# 爬取狗狗网站
import requests
import os
import numpy as np
from lxml import etree

ACCESS_BASE_PATH = './'
BASE_PATH = './爬取文件/'  # 用来保存文件的地址
SAVE_PATH = './爬取文件/files'

index_url = 'https://sougou.com/'
heads = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}


def exe_url(resourceUrl):
    if resourceUrl.startswith('./'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('//'):
        resourceUrl = 'https:' + resourceUrl
    elif resourceUrl.startswith('/'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('http') or resourceUrl.startswith('https'):
        # 不处理，这是我们想要的url格式
        pass
    else:
        resourceUrl = index_url + resourceUrl
    return resourceUrl


def get_internal_web():
    if not os.path.exists(SAVE_PATH):
        print('> 目标目录不存在，创建：', SAVE_PATH)
        os.makedirs(SAVE_PATH)
    index_result = requests.get(url=index_url, headers=heads)
    index_page_text = index_result.text
    html = etree.HTML(index_page_text)
    url_list = html.xpath('//a/@href')  # html 文件
    url_list = np.unique(url_list)
    print(url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        if ((resourceUrl == '#') | (resourceUrl.startswith('http'))):
            continue
        if resourceUrl.startswith('/'):
            resourceUrl = resourceUrl.replace('/', '')
        file_name = resourceUrl.strip().split('.')[-2]
        print(file_name)

        # 新建目录
        FILE_PATH = BASE_PATH + file_name + '_files'
        if not os.path.exists(FILE_PATH):
            print('> 目标目录不存在，创建：', FILE_PATH)
            os.makedirs(FILE_PATH)

        # 替换网址信息
        index_page_text = index_page_text.replace(resourceUrl, './' + file_name + '.html')

        # 解析网址数据
        resourceUrl = exe_url(resourceUrl)
        get_index(resourceUrl, FILE_PATH, file_name)

    index_page_text = get_link(index_page_text, SAVE_PATH)
    index_page_text = get_script(index_page_text, SAVE_PATH)
    index_page_text = get_imgs(index_page_text, SAVE_PATH)
    index_page_text = index_page_text.replace('/./', './')
    with open(f'./爬取文件/狗狗.html', mode='w', encoding='utf-8') as index_file:
        index_file.write(index_page_text)  # 读取爬到网页的源代码
    index_result.close()
    print('over')


def get_index(resourceUrl, FILE_PATH, file_name):
    result = requests.get(url=resourceUrl, headers=heads)
    page_text = result.text
    page_text = get_link(page_text, FILE_PATH)
    page_text = get_script(page_text, FILE_PATH)
    page_text = get_imgs(page_text, FILE_PATH)
    print(f'./爬取文件/{file_name}.html')
    print(f'./爬取文件/{file_name}.html')
    print(f'./爬取文件/{file_name}.html')
    with open(f'./爬取文件/{file_name}.html', mode='w', encoding='utf-8') as file:
        file.write(page_text)  # 读取爬到网页的源代码
    result.close()
    print('over')


# 解析需要下载的css、js文件路径
def get_link(page_text, FILE_PATH):
    html = etree.HTML(page_text)
    url_list = html.xpath('//link/@href')  # css、js 文件
    print(url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        file_name = resourceUrl.strip().split('/')[-1]
        print(file_name)
        page_text = page_text.replace(resourceUrl, FILE_PATH.replace('爬取文件/', '') + '/' + file_name)
        resourceUrl = exe_url(resourceUrl)
        result = requests.get(url=resourceUrl, headers=heads)
        with open(f'{FILE_PATH}/{file_name}', mode='w', encoding='utf-8') as file:
            file.write(result.text)  # 读取爬到网页的源代码
        result.close()
    return page_text


def get_script(page_text, FILE_PATH):
    html = etree.HTML(page_text)
    url_list = html.xpath('//script/@src')  # css、js 文件
    print(url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        file_name = resourceUrl.strip().split('/')[-1]
        print(file_name)
        page_text = page_text.replace(resourceUrl, FILE_PATH.replace('爬取文件/', '') + '/' + file_name)
        resourceUrl = exe_url(resourceUrl)
        result = requests.get(url=resourceUrl, headers=heads)
        with open(f'{FILE_PATH}/{file_name}', mode='w', encoding='utf-8') as file:
            file.write(result.text)  # 读取爬到网页的源代码
        result.close()
    return page_text


def get_imgs(page_text, FILE_PATH):
    html = etree.HTML(page_text)
    url_list = html.xpath('//img/@src')
    print(url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        file_name = resourceUrl.strip().split('/')[-1]
        print(file_name)
        page_text = page_text.replace(resourceUrl, FILE_PATH.replace('爬取文件/', '') + '/' + file_name)
        resourceUrl = exe_url(resourceUrl)
        result = requests.get(url=resourceUrl, headers=heads)
        with open(f'{FILE_PATH}/{file_name}', mode='wb') as file:
            file.write(result.content)  # 读取爬到网页的源代码
        result.close()
    return page_text


if __name__ == "__main__":
    get_internal_web()
