# -*- coding: utf-8 -*-
# 查看网页编码格式：document.charset
# 爬取网站 西雅图时报

import requests
import os
import re
import numpy as np
from lxml import etree
from bs4 import BeautifulSoup
from urllib.parse import urlparse

first_path = ''
second_path = 'files'  # 用来保存文件的地址
url_list_result = []

no_access_url_list = []  # 拒绝访问的网址（黑名单）

index_url_list = [
    # 'https://www.military.com/',           # 有一个 js 不好下载，可能超时
    # 'https://www.bostonglobe.com/',        # no 图片无法加载，乱码
    # 'https://www.globaltimes.cn/',         # no 前端代码存在的加载方式：load_file("index2.html");  无法展示
    # 'https://abcnews.go.com/',             # no
    # 'https://www.arabnews.com/',           # no
    # 'https://www.foxnews.com/',            # no
    # 'https://www.seattletimes.com/',       # no
    # 'https://tass.com/',                   # no
    # 'https://www.zaobao.com/',             # no 乱码
    # 'https://www.newsday.com/',            # no 一闪而过，动态加载 js
    # 'https://www.rt.com/',                 # no 把图片隐藏在 <img> 标签的 data-src 属性中, 修改方式：覆盖 src 属性
    # 'https://www.cnbc.com/',               # no 存在乱码  由于 ‘xxx.jpg?v=1703638428&w=960&h=730&vtcrop=y’  后面字符串不同的问题，会导致替换失败；部分图片动态加载
    # 'https://www.miamitodaynews.com/',     # .jpg 后面有字母的话，图片不会显示(已经解决)
    # 'https://www.usnews.com/',             # 网站太大了
    # 'https://www.boston.com/',
    # 'https://edition.cnn.com/',
    'https://www.latimes.com/',            # 存在视频，比较好爬取，不过 在 <source> 标签中
    # 'https://www.cbsnews.com/',
    # 'https://therealnews.com/',
]

heads = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}


# 获取网址后缀
def get_domain_suffix_using_urlparse(url):
    parsed_url = urlparse(url)
    domain = parsed_url.netloc
    suffix = domain.split('.')[-1] if domain else None
    return suffix


# 生成域名
def get_domain(url):
    parsed_url = urlparse(url)
    return parsed_url.netloc


# 拼接完整路径
def exe_url(resourceUrl, index_url):
    if resourceUrl.startswith('./'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('//'):
        resourceUrl = 'https:' + resourceUrl
    elif resourceUrl.startswith('/'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('http') or resourceUrl.startswith('https'):
        # 不处理，这是我们想要的url格式
        pass
    else:
        resourceUrl = index_url + resourceUrl
    return resourceUrl


# 新建爬取网站的文件目录
def creat_file(path_param):
    if not os.path.exists(path_param):
        print(' > 目标目录不存在，创建：', path_param)
        os.makedirs(path_param)
    else:
        pass


# 根据网址形成文件名称
def creat_file_name(webset_param):
    new_file_name = ''
    if (webset_param.endswith('index.html')):
        new_file_name = webset_param.split('/')[-2] + webset_param.split('/')[-1]
    elif (webset_param.endswith('.html')):
        new_file_name = webset_param.split('/')[-1]
    elif (webset_param.endswith('/')):
        new_file_name = webset_param.split('/')[-2] + '.html'
    elif ('/' not in webset_param):
        new_file_name = webset_param + '.html'
    else:
        new_file_name = webset_param.split('/')[-2] + webset_param.split('/')[-1] + '.html'
    if (new_file_name.startswith('region=world')):
        new_file_name = new_file_name.split('region=world')[-1]
    return replace_special_str(new_file_name)


# 特殊字符去除
def replace_special_str(new_file_name):
    replacements = {
        '\\': '', '/': '', ':': '', '*': '', '?': '', '<': 'g', '>': 'f',
        '|': '', '%': 'a', '#': 'b', '+': 'c', '&': 'd', '=': 'e', '-': 'h'
    }

    # 检查是否需要替换，并执行替换操作
    new_file_name = new_file_name.translate(str.maketrans(replacements))

    if (len(new_file_name) > 50):
        new_file_name = new_file_name[len(new_file_name) - 40:]
    return new_file_name


# 格式化访问路径
def formate_url(url_param):
    # 定义一个映射扩展名和其对应值的字典
    extension_mapping = {
        '.js': '.js', '.css': '.css', '.io': '.io', '.rss': '.rss', '.ico': '.ico', '.jpg': '.jpg', '.JPG': '.JPG',
        '.jpeg': '.jpeg', '.JPEG': '.JPEG', '.png': '.png', '.PNG': '.PNG', '.gif': '.gif', '.GIF': '.GIF', '.mp4': '.mp4', '.MP4': '.MP4',
        '.svg': '.svg', '.SVG': '.SVG'
    }
    for ext, replace_ext in extension_mapping.items():
        if ext in url_param:
            url_param = url_param.split(ext)[0] + replace_ext

    return url_param


# 访问网址，并爬取
def file_access_website(access_url, path_param, file_name):
    try:
        # 判断文件是否存在
        if (os.path.exists(f'{path_param}/{file_name}')):
            pass
        else:
            print('爬取网站中······', access_url)
            result = requests.get(url=access_url, headers=heads)
            with open(f'{path_param}/{file_name}', mode='w', encoding='utf-8') as file:
                file.write(result.text)  # 读取爬到网页的源代码
            result.close()
            print('爬取结束！！！')
    except Exception as e:
        print('错误信息：', e)
        no_access_url_list.append(access_url)
        pass
    else:
        pass
    finally:
        pass


# 删除图片的响应式加载
def del_resp_imgs(page_text_param):
    # 删除 <source> 标签
    # pattern = re.compile(r'<source[^>]+>', re.S)
    # page_text_param = pattern.sub('', page_text_param)

    soup = BeautifulSoup(page_text_param, 'html.parser')
    # 删除 <img> 标签中的 srcset 属性

    # print('================== img ========================')
    # 查找所有的<img>标签
    img_tags = soup.find_all('img')

    # 遍历每个<img>标签，并删除srcset属性
    for img in img_tags:
        if 'srcset' in img.attrs:
            del img['srcset']
        if 'data-srcset' in img.attrs:
            del img['data-srcset']
    #     if 'data-src' in img.attrs:
    #         del img['data-src']
    #
    # page_text_param = soup.prettify()

    # print('================== video ========================')
    # 查找所有的<video>标签
    video_tags = soup.find_all('video')

    # 遍历每个<img>标签，并删除srcset属性
    for video in video_tags:
        if 'poster' in video.attrs:
            del video['poster']
            video.attrs['controls'] = ''

    page_text_param = soup.prettify()

    # print('================== source ========================')
    # 查找所有的<source>标签
    # img_tags = soup.find_all('source')
    #
    # # 遍历每个<img>标签，并删除srcset属性
    # for img in img_tags:
    #     if 'srcset' in img.attrs:
    #         del img['srcset']
    #
    #         # 打印修改后的HTML
    # print(soup.prettify())

    return page_text_param


# 解析需要下载的css、js文件路径
def get_link(page_text, path_param, url_param):
    html = etree.HTML(page_text)
    url_list = html.xpath('//link/@href')  # css、js 文件
    script_list = html.xpath('//script/@src')  # css、js 文件
    url_list.extend(script_list)
    print("link 合法 url_list: ", len(url_list))

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        if ((resourceUrl not in no_access_url_list) & ('.com' not in resourceUrl)):
            access_resourceUrl = exe_url(resourceUrl, url_param)
            if ((re.match(r'(\w+):\/\/.+$', access_resourceUrl))):
                print("link 合法url: ", access_resourceUrl)
                access_resourceUrl = formate_url(access_resourceUrl)
                file_name = replace_special_str(access_resourceUrl.strip())
                if (('http' in resourceUrl) | ('//' in resourceUrl)):
                    page_text_back = page_text
                    page_text = page_text.replace(resourceUrl, f'./files/{file_name}')
                    # 判断是否替换成功
                    if (page_text == page_text_back):
                        page_text = page_text.replace(access_resourceUrl, f'./files/{file_name}')
                else:
                    page_text = page_text.replace('"' + resourceUrl + '"', '"' + f'./files/{file_name}' + '"')

                if (('.js' in access_resourceUrl) | ('.css' in access_resourceUrl) | ('.ico' in access_resourceUrl)):
                    # 访问网址
                    # 判断文件是否存在
                    if (os.path.exists(f'{path_param}/{file_name}')):
                        print(f'{path_param}/{file_name}', '文件存在！！！')
                        pass
                    elif (access_resourceUrl not in no_access_url_list):
                        file_access_website(access_resourceUrl, path_param, file_name)
                elif (('.jpg' in resourceUrl) | ('.JPG' in resourceUrl) | ('.jpeg' in resourceUrl) | (
                        '.JPEG' in resourceUrl) | ('.png' in resourceUrl) | ('.PNG' in resourceUrl)):
                    try:
                        # 判断文件是否存在
                        if (os.path.exists(f'{path_param}/{file_name}')):
                            print('文件存在！！')
                            pass
                        else:
                            print('访问图片！！！', access_resourceUrl)
                            result = requests.get(url=access_resourceUrl, headers=heads)
                            with open(f'{path_param}/{file_name}', mode='wb') as file:
                                file.write(result.content)  # 读取爬到网页的源代码
                            result.close()
                            print('图片保存成功！！')
                    except Exception as e:
                        print('错误信息：', e)
                        no_access_url_list.append(access_resourceUrl)
                        pass
                    else:
                        pass
                    finally:
                        pass
    return page_text


def get_imgs(page_text, path_param, url_param):
    html = etree.HTML(page_text)
    url_list = html.xpath('//img/@src')
    # imgs_srcset_list = html.xpath('//img/@srcset')
    # imgs_data_srcset_list = html.xpath('//img/@data-srcset')
    # imgs_data_src_list = html.xpath('//img/@data-src')
    videos_url_list = html.xpath('//video/@src')
    source_url_list = html.xpath('//source/@src')
    source_data_src_url_list = html.xpath('//source/@data-src')
    srcset_url_list = html.xpath('//source/@srcset')

    # url_list.extend(imgs_srcset_list)
    # url_list.extend(imgs_data_srcset_list)
    # url_list.extend(imgs_data_src_list)
    url_list.extend(videos_url_list)
    url_list.extend(source_url_list)
    url_list.extend(source_data_src_url_list)
    url_list.extend(srcset_url_list)
    # print("imgs 合法 url_list: ", len(url_list))
    print("imgs 合法 url_list: ", url_list)

    # 遍历下载 img 文件
    for resourceUrl in url_list:
        if (resourceUrl not in no_access_url_list):
            access_resourceUrl = formate_url(resourceUrl)
            suffix_list = ['js', 'css', 'io', 'rss', 'ico', 'jpg', 'JPG', 'jpeg', 'JPEG', 'png', 'PNG', 'gif', 'GIF',
             'svg', 'SVG', 'mp4', 'MP4', 'm3u8']
            file_name = ''
            if(access_resourceUrl.split('.')[-1] in suffix_list):
                file_name = replace_special_str(access_resourceUrl.strip())
            else:
                file_name = replace_special_str(access_resourceUrl.strip()) + '.jpg'
            print("原生 url: ", resourceUrl, "imgs 合法url: ", access_resourceUrl, f'{path_param}/{file_name}')
            if (('http' in resourceUrl) | ('//' in resourceUrl)):
                page_text_back = page_text
                page_text = page_text.replace(resourceUrl, f'./files/{file_name}')
                # 判断是否替换成功
                if (page_text == page_text_back):
                    page_text = page_text.replace(access_resourceUrl, f'./files/{file_name}')
            else:
                page_text = page_text.replace('"' + resourceUrl + '"', '"' + f'./files/{file_name}' + '"')

            try:
                # 判断文件是否存在
                if (os.path.exists(f'{path_param}/{file_name}')):
                    print(f'{path_param}/{file_name}', '文件存在！！！')
                    pass
                else:
                    print('访问图片！！！')
                    access_resourceUrl = exe_url(access_resourceUrl, url_param)
                    result = requests.get(url=access_resourceUrl, headers=heads)
                    creat_file(path_param)
                    with open(f'{path_param}/{file_name}', mode='wb') as file:
                        file.write(result.content)  # 读取爬到网页的源代码
                    print('文件保存成功！！')
                    result.close()
            except Exception as e:
                print('错误信息：', e)
                no_access_url_list.append(resourceUrl)
                pass
            else:
                pass
            finally:
                pass

    return page_text


# 03. 爬取网页 html 文件
# False: 是否是第二层网址
# url_suffix: 域名后缀
# website: 网址
# domain_name: 域名
# index_url: 爬取网址路径
# first_path: 文件存放路径
# file_name: 生成文件名称
def get_internal_web(flag, url_suffix, website_param, domain_name, url_param, path_param, file_name_param):
    if (domain_name in url_param):
        try:
            # 01. 新建爬取网站的文件
            creat_file(path_param)

            # 02. 访问网页信息，解析出该网页所包含的url信息
            index_result = requests.get(url=url_param, headers=heads)
            index_page_text = index_result.text
            # print(index_page_text)
            html = etree.HTML(index_page_text)
            url_list = html.xpath('//a/@href')  # html 文件
            url_list = np.unique(url_list)
            print('该网站包含的网址：', url_list)

            # 遍历首页中包含的 url_list
            for resourceUrl in url_list:
                if (resourceUrl.startswith('#') | (resourceUrl.startswith('/') & (len(resourceUrl) < 2))):
                    pass
                elif ((len(resourceUrl.split(url_suffix)[-1]) == 0) | (resourceUrl.startswith('domain_name'))):
                    # 替换网址信息
                    index_page_text = index_page_text.replace('<a href="' + resourceUrl + '"', '<a href="' + '#' + '"')
                    pass
                else:
                    new_file_webset = exe_url(resourceUrl, website_param)

                    # 递归访问网页：二层
                    if (flag):
                        url_list_result.append(new_file_webset)
                    # 无限爬取
                    # url_list_result.append(new_file_webset)

                    # 跳转网页的 html 文件
                    print(new_file_webset)
                    new_file_name = creat_file_name(new_file_webset)
                    print('原网址：', resourceUrl, '拼接后的网址：', new_file_webset, file_name_param, '新生成的文件名：',
                          new_file_name)

                    # 替换网址信息
                    index_page_text = index_page_text.replace('"' + resourceUrl + '"', '"' + './' + new_file_name + '"')

            index_page_text = get_imgs(index_page_text, path_param, website_param)
            index_page_text = get_link(index_page_text, path_param, website_param)
            # print(index_page_text)
            index_page_text = index_page_text.replace('/./', './')
            # 更新响应式图片解析方式
            index_page_text = del_resp_imgs(index_page_text)
            soup = BeautifulSoup(index_page_text, 'html.parser')
            video_container = soup.find('div', class_='video-js vjs-16-9 skin')  # 定位video所在的div容器
            if video_container:  # 如果找到了<video>标签
                if video_container.find('video').find_next_sibling('div'):  # 如果找到了<div>标签
                    # 移除<div>标签和其内容，并将修改后的HTML内容打印出来
                    video_container.find('video').find_next_sibling('div').decompose()  # 删除第一个div标签
                    index_page_text = soup.prettify()
                else:
                    print("No <div> tag found after <video> tag.")
            else:
                print("No <video> tag found in the HTML.")

            with open(f'./{path_param.split("/")[1]}/{file_name_param}', mode='w', encoding='UTF-8') as index_file:
                index_file.write(index_page_text)  # 读取爬到网页的源代码
            index_result.close()
            print(len(url_list_result))
        except Exception as e:
            print('错误信息：', e)
            no_access_url_list.append(url_param)
            pass
        else:
            pass
        finally:
            pass
    print('over')


if __name__ == "__main__":
    for index_url in index_url_list:
        # 获取网址后缀
        url_suffix = "." + get_domain_suffix_using_urlparse(index_url)
        if (isinstance(index_url, list)):
            pass
        elif (index_url.startswith("http://") or index_url.startswith("https://")):
            first_path = './' + index_url.split(url_suffix)[-2].split('/')[-1].split('.')[
                -1] + '_files/files'  # 存放js、css、imgs 文件
            domain_name = ''  # 存放 “域名”
            if ('www.' in index_url):
                domain_name = index_url.split(url_suffix)[0].split('www.')[1]
            else:
                domain_name = index_url.split(url_suffix)[0].split('https://')[1]
            file_name = index_url.split(url_suffix)[-2].split('/')[-1].split('.')[-1] + '.html'  # 生成的文件名称
            print('新建的文件名：', index_url.split(url_suffix)[-2].split('/')[-1].split('.')[-1] + '.html')
            website = index_url  # 网址路径
            get_internal_web(True, url_suffix, website, domain_name, index_url, first_path, file_name)
            # i = 0
            # for child_url in url_list_result:
            #     i = i + 1
            #     print(f'{i}二层网址：', child_url)
            #     get_internal_web(False, url_suffix, website, domain_name, child_url, first_path, creat_file_name(child_url))
            print('over')

    # 爬取视频
    # index_url = 'https://www.latimes.com/environment/00000188-ea98-d427-a18a-fbf9ba930001-123'
    # url_suffix = "." + get_domain_suffix_using_urlparse(index_url)
    # website = get_domain(index_url)
    # domain_name = ''
    # if ('www.' in index_url):
    #     domain_name = index_url.split(url_suffix)[0].split('www.')[1]
    # else:
    #     domain_name = index_url.split(url_suffix)[0].split('https://')[1]
    # first_path = './' + website.split(url_suffix)[-2].split('/')[-1].split('.')[-1] + '_files/files'  # 存放js、css、imgs 文件
    # file_name = creat_file_name(index_url)  # 生成的文件名称
    # # print(False, url_suffix, website, domain_name, index_url, first_path, file_name)
    # get_internal_web(False, url_suffix, website, domain_name, index_url, first_path, file_name)
