# !/usr/bin/env Python
# coding=utf-8
# 查看网页编码格式：document.charset
# 爬取网站

import requests
import os
import re
import numpy as np
from lxml import etree
from urllib import parse
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options


first_path = ''
second_path = 'files'  # 用来保存文件的地址

url_list_result = []

no_access_url_list = []  # 拒绝访问的网址（黑名单）

index_url_list = ['https://www.globaltimes.cn/',
                  'https://www.zaobao.com/'
                  ]

heads = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}


# 拼接完整路径
def exe_url(resourceUrl, index_url):
    if resourceUrl.startswith('./'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('//'):
        resourceUrl = 'https:' + resourceUrl
    elif resourceUrl.startswith('/'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('http') or resourceUrl.startswith('https'):
        # 不处理，这是我们想要的url格式
        pass
    else:
        resourceUrl = index_url + resourceUrl
    return resourceUrl


# 新建爬取网站的文件目录
def creat_file(path_param):
    if not os.path.exists(path_param):
        print(' > 目标目录不存在，创建：', path_param)
        os.makedirs(path_param)
    else:
        pass


# 根据网址形成 html 文件名称
def creat_file_name(webset_param):
    new_file_name = ''
    if (webset_param.endswith('index.html')):
        new_file_name = webset_param.split('/')[-2] + webset_param.split('/')[-1]
    elif (webset_param.endswith('.html')):
        new_file_name = webset_param.split('/')[-1]
    elif (webset_param.endswith('/')):
        new_file_name = webset_param.split('/')[-2] + '.html'
    else:
        new_file_name = webset_param.split('/')[-2] + webset_param.split('/')[-1] + '.html'
    if (new_file_name.startswith('region=world')):
        new_file_name = new_file_name.split('region=world')[-1]
    return replace_special_str(new_file_name)


# 特殊字符去除
def replace_special_str(new_file_name):
    if (('\\' in new_file_name) | ('/' in new_file_name)
            | (':' in new_file_name) | ('*' in new_file_name)
            | ('?' in new_file_name) | ('<' in new_file_name)
            | ('>' in new_file_name) | ('|' in new_file_name)):
        new_file_name = new_file_name.replace('\\', '')
        new_file_name = new_file_name.replace('/', '')
        new_file_name = new_file_name.replace(':', '')
        new_file_name = new_file_name.replace('*', '')
        new_file_name = new_file_name.replace('?', '')
        new_file_name = new_file_name.replace('<', '')
        new_file_name = new_file_name.replace('>', '')
        new_file_name = new_file_name.replace('|', '')
    return new_file_name


# 格式化访问路径
def formate_url(url_param):
    if ('.js' in url_param):
        url_param = url_param.split('.js')[0] + '.js'
    if ('.css' in url_param):
        url_param = url_param.split('.css')[0] + '.css'
    if ('.ico' in url_param):
        url_param = url_param.split('.ico')[0] + '.ico'
    if ('.js' in url_param):
        url_param = url_param.split('.js')[0] + '.js'
    if ('.css' in url_param):
        url_param = url_param.split('.css')[0] + '.css'
    if ('.ico' in url_param):
        url_param = url_param.split('.ico')[0] + '.ico'
    if ('.jpg' in url_param):
        url_param = url_param.split('.jpg')[0] + '.jpg'
    if ('.JPG' in url_param):
        url_param = url_param.split('.JPG')[0] + '.JPG'
    if ('.jpeg' in url_param):
        url_param = url_param.split('.jpeg')[0] + '.jpeg'
    if ('.JPEG' in url_param):
        url_param = url_param.split('.JPEG')[0] + '.JPEG'
    if ('.png' in url_param):
        url_param = url_param.split('.png')[0] + '.png'
    if ('.PNG' in url_param):
        url_param = url_param.split('.PNG')[0] + '.PNG'
    return url_param


# 访问网址，并爬取
def file_access_website(access_url, path_param, file_name):
    print('下载文件：', path_param + file_name, '     访问网址：', access_url)
    try:
        result = requests.get(url=access_url, headers=heads)
        page_text = parse.unquote(result.text.encode('unicode_escape').decode('utf-8', 'ignore').replace('\\x', '%'))
        # 判断文件是否存在
        if (os.path.exists(f'{path_param}/{file_name}')):
            pass
        else:
            with open(f'{path_param}/{file_name}', mode='w', encoding='utf-8') as file:
                file.write(page_text)  # 读取爬到网页的源代码
        result.close()
    except:
        no_access_url_list.append(access_url)
        pass
    else:
        pass
    finally:
        pass


# 解析需要下载的css、js文件路径
def get_link(page_text, path_param, url_param):
    print('下载 link 标签中的文件')
    html = etree.HTML(page_text)
    url_list = html.xpath('//link/@href')  # css、js 文件
    print('link 标签: ', url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        # 判断网址是否在黑名单中
        if (resourceUrl not in no_access_url_list):
            # 判断获取的是否是一个 list 数组，不是的话进行处理
            if (isinstance(resourceUrl, list)):
                pass
            else:
                resourceUrl = exe_url(resourceUrl, url_param)
                if (resourceUrl.startswith("http://") or resourceUrl.startswith("https://")):
                    if (('.js' in resourceUrl) | ('.css' in resourceUrl) | ('.ico' in resourceUrl)):
                        print("link 标签中合法url: ", resourceUrl)
                        resourceUrl = formate_url(resourceUrl)
                        file_name = replace_special_str(resourceUrl.strip())

                        if (len(file_name.split('.')[-1]) > 5):
                            pass
                        else:
                            page_text = page_text.replace(resourceUrl,
                                                          path_param.replace(path_param.split("/")[1] + '/',
                                                                             '') + '/' + file_name)
                            resourceUrl = exe_url(resourceUrl, url_param)

                            # 访问网址
                            file_access_website(resourceUrl, path_param, file_name)

    return page_text


def get_script(page_text, path_param, url_param):
    html = etree.HTML(page_text)
    url_list = html.xpath('//script/@src')  # css、js 文件
    print('script 标签： ', url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        if (resourceUrl not in no_access_url_list):
            if (isinstance(resourceUrl, list)):
                pass
            else:
                resourceUrl = exe_url(resourceUrl, url_param)
                if (resourceUrl.startswith("http://") or resourceUrl.startswith("https://")):
                    print("script 标签中的合法 url")
                    resourceUrl = formate_url(resourceUrl)  #
                    file_name = replace_special_str(resourceUrl.strip())
                    print(file_name)
                    if (len(file_name.split('.')[-1]) > 5):
                        pass
                    else:
                        page_text = page_text.replace(resourceUrl,
                                                      path_param.replace(path_param.split("/")[1] + '/',
                                                                         '') + '/' + file_name)
                        resourceUrl = exe_url(resourceUrl, url_param)

                        # 访问网址
                        file_access_website(resourceUrl, path_param, file_name)

    return page_text


def get_imgs(page_text, path_param, url_param):
    print('下载 img 图片文件')
    html = etree.HTML(page_text)
    url_list = html.xpath('//img/@src')
    meta_img_list = html.xpath('//meta/@content')
    url_list.append(meta_img_list)
    print('imgs 标签： ', url_list)

    # 遍历下载 img 文件
    for resourceUrl in url_list:
        if (resourceUrl not in no_access_url_list):
            if (isinstance(resourceUrl, list)):
                pass
            else:
                resourceUrl = exe_url(resourceUrl, url_param)
                if (resourceUrl.startswith("http://") or resourceUrl.startswith("https://")):
                    print("imgs 合法url: ", resourceUrl)
                    resourceUrl = formate_url(resourceUrl)
                    file_name = replace_special_str(resourceUrl.strip())
                    print(file_name)
                    if (len(file_name.split('.')[-1]) > 5):
                        pass
                    else:
                        page_text = page_text.replace(resourceUrl,
                                                      path_param.replace(path_param.split("/")[1] + '/',
                                                                         '') + '/' + file_name)
                        resourceUrl = exe_url(resourceUrl, url_param)
                        try:
                            result = requests.get(url=resourceUrl, headers=heads)
                            # 判断文件是否存在
                            if (os.path.exists(f'{path_param}/{file_name}')):
                                pass
                            else:
                                with open(f'{path_param}/{file_name}', mode='wb') as file:
                                    file.write(result.content)  # 读取爬到网页的源代码
                            result.close()
                        except:
                            no_access_url_list.append(resourceUrl)
                            pass
                        else:
                            pass
                        finally:
                            pass

    return page_text


# 03. 爬取网页 html 文件
#           flag：          判断层级，判断是否继续新增下层的网页网址
#           domain_name：    判断下载的网址，是否是该网站下的网址，如果是的话，就执行
#           url_param：      网站的域名
#           path_param：     js、css、imgs 存放的文件相对路径
#           file_name_param：保存的 html 文件的名称
def get_internal_web(flag, domain_name, url_param, file_path_param, file_name_param):
    if (url_param.startswith(domain_name)):
        # 01. 新建爬取网站的文件
        creat_file(file_path_param)

        # 02. 访问网页信息，解析出该网页所包含的 url 信息
        driver = webdriver.PhantomJS()
        driver.get('http://www.baidu.com/')

        print(driver.page_source)
        print(url_param)





        # index_result = requests.get(url=url_param, headers=heads)
        # index_page_text = parse.unquote(index_result.text.encode('unicode_escape').decode('utf-8').replace('\\x', '%'))
        # print(index_page_text)
        #
        # html = etree.HTML(index_page_text)
        # url_list = html.xpath('//a/@href')  # html 文件
        #
        # urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
        #                   index_page_text)
        # print(urls)
        # # 把所有 url 整一起
        # url_list.extend(urls)
        # unique_list = np.unique(url_list)
        # print('该网站包含的网址：', len(unique_list), unique_list)
        #
        # # 遍历首页中包含的 url_list
        # for resourceUrl in url_list:
        #     if (resourceUrl.startswith('#') | (resourceUrl.startswith('/') & (len(resourceUrl) < 2))):
        #         pass
        #     elif ((len(resourceUrl.split('.com')[-1]) == 0) | (resourceUrl.startswith('domain_name'))):
        #         # 替换网址信息
        #         index_page_text = index_page_text.replace('<a href="' + resourceUrl + '"', '<a href="' + '#' + '"')
        #         pass
        #     else:
        #         new_file_webset = exe_url(resourceUrl, url_param)
        #         print('拼接后的网址：', new_file_webset)
        #         if (new_file_webset in url_list_result):
        #             pass
        #         else:
        #             # 递归访问网页
        #             if (flag):
        #                 url_list_result.append(new_file_webset)
        #             # get_internal_web(url_param, file_path_param)
        #             # 跳转网页的 html 文件
        #             new_file_name = creat_file_name(new_file_webset)
        #             print('传过来的文件名：', file_name_param)
        #             if (new_file_name.startswith('region=world')):
        #                 new_file_name = new_file_name.split('region=world')[-1]
        #             print('新生成的文件名：', new_file_name)
        #
        #             # 替换网址信息
        #             index_page_text = index_page_text.replace(resourceUrl, './' + new_file_name)
        #
        # print(len(url_list_result))
        # index_page_text = get_link(index_page_text, file_path_param, url_param)
        # index_page_text = get_script(index_page_text, file_path_param, url_param)
        # index_page_text = get_imgs(index_page_text, file_path_param, url_param)
        # index_page_text = index_page_text.replace('/./', './')
        # index_page_text = index_page_text.replace('\\n', '')
        # if (file_name_param.startswith('region=world')):
        #     file_name_param = file_name_param.split('region=world')[-1]
        # with open(f'./{file_path_param.split("/")[1]}/{file_name_param}', mode='w', encoding='UTF-8') as index_file:
        #     index_file.write(index_page_text)  # 读取爬到网页的源代码
        # index_result.close()
        print('over')




if __name__ == "__main__":
    for index_url in index_url_list:
        if (isinstance(index_url, list)):
            pass
        elif (index_url.startswith("http://") or index_url.startswith("https://")):
            try:
                # 域名
                domain = index_url.split('.')[-1].split('/')[0]
                # 文件存放地址
                first_path = './' + index_url.split('.' + domain)[-2].split('/')[-1].split('.')[-1] + '_files/files'
                domain_name = index_url.split('.' + domain)[0]
                # 开始访问网页
                get_internal_web(True, domain_name, index_url, first_path,
                                 index_url.split('.com')[-2].split('/')[-1].split('.')[-1] + '.html')
                # i = 1
                # for index_url in url_list_result:
                #     i = i + 1
                #     print(f'{i}二层网址：', index_url)
                #     get_internal_web(False, domain_name, index_url, first_path, creat_file_name(index_url))
                print('over')

            except:
                pass
            else:
                pass
            finally:
                pass
