import os
import sys
import time
import traceback

import yaml
import requests
from bs4 import BeautifulSoup
from urllib import parse
import urllib3

max_page = 0
max_pic = 0
last_url = ''
# 默认下载地址
pic_path = 'download_pictures/'
# 反向代理
proxy_url = ''
# proxy_url = 'https://cors.eu.org/'

# 去掉InsecureRequestWarning（不安全的请求警告）
urllib3.disable_warnings()


# 获取当前时间
def get_now_time():
    now = time.localtime()
    now_time = time.strftime("%Y-%m-%d %H:%M:%S", now)
    return now_time


# 处理异常
def error_log():
    msg = traceback.format_exc()
    print(msg)
    print('出大问题：', msg)
    with open('错误日志.txt', 'a', encoding='utf-8') as f:
        f.write(f'[{get_now_time()}]: {msg}')
        f.write(f'{"-" * 40}\n')


# 读取yml文件
def read_config(path):
    with open(path, encoding='utf-8') as f:
        datas = yaml.load(f, Loader=yaml.FullLoader)
    print(datas)
    sava_path = datas['save_path']
    # 剔除空标签 以及path标签
    del_tag = ['save_path']
    for i in datas:
        if datas[i] is None:
            del_tag.append(i)
    for i in del_tag:
        del datas[i]
    print('读取配置成功：', datas)
    global pic_path
    # pic_path = 'download_pictures\\' + datas['search_tag'] + '[OrderBy' + datas['order_by'] + ']\\'
    pic_path = sava_path + datas['search_tag'] + '[OrderBy' + datas['order_by'] + ']\\'
    return datas


# 获取网页
def get_html(url, datas):
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
    }
    try:
        r = requests.get(url=url, params=datas, headers=header, verify=False)
        # 增加重连接次数
        requests.adapters.DEFAULT_RETRIES = 5
        # 关闭多余的链接
        s = requests.session()
        s.keep_alive = False
    except Exception as e:
        error_log()
        return ''
    global last_url
    last_url = r.url
    r.encoding = r.apparent_encoding
    return r.text


# 获取搜索结果信息
def get_pageInfo(html):
    global max_page
    global max_pic
    soup = BeautifulSoup(html, 'html.parser')
    # 获取分页
    page_list = soup.find(name='p', attrs={'class': 'numeric_pages'})
    max_pic = int(page_list.parent.text.strip().split(' ')[0])
    if max_pic == 0:
        print('没有搜索到结果，请重新修改搜索标签！')
        sys.exit()
    max_page = int(page_list.findAll()[-2].string)


# 获取所有结果页面的url
def get_page_url():
    page_list = []
    for i in range(max_page + 1):
        url_split = last_url.split('?')
        url_pre = url_split[0].split('view_posts/')[0] + 'view_posts/' + str(i)
        params = url_split[1]
        page_list.append(url_pre + '?' + params)
    return page_list


# 获取图片下载链接
def parser_url(page_list):
    page_n = 0  # 当前页数
    pic_n = 1  # 当前图片序号
    # 将下载链接写入到文件
    global pic_path
    url_list = pic_path.split('\\')
    url_list[-2] += '-下载日志.txt'
    log_name = '\\'.join(url_list)[:-1].replace('||', '[or]').replace('&&', '[and]')
    print(log_name)
    with open(log_name, 'w+', encoding='utf-8') as f:
        log = f'本次任务概览：\n爬取到分页数：{max_page}, 总图片数：{max_pic}\n入口页面：{parse.unquote(last_url)}\n保存路径：{pic_path}\n{"-" * 20}\n'
        f.write(log)
        # 第一层循环 遍历解析每个搜索结果页
        for page_url in page_list:
            page_text = get_html(page_url, datas={})
            soup = BeautifulSoup(page_text, 'html.parser')
            # 第二层循环 遍历解析每个结果页中的图片展示页链接
            for i in soup.find('div', attrs={'class': 'posts_block'}).find_all('span'):
                pic_url = 'https://anime-pictures.net' + i.find('a')['href'].strip()
                pic_page = get_html(pic_url, datas={})
                # 从图片展示页中解析下载链接
                soup2 = BeautifulSoup(pic_page, 'html.parser')
                download_link = 'https://anime-pictures.net' + soup2.find('a', attrs={'id': 'download_image_link'})[
                    'href']
                # 将url解码
                download_link = parse.unquote(download_link)
                # 下载图片 并传入当前索引
                download_pic(download_link, pic_n)
                # 将下载链接写入日志
                download_log = f'第[{pic_n}/{max_pic}]张, 第[{page_n}/{max_page}]页, [{get_now_time()}]\n{download_link}'
                f.write(download_log + '\n')
                print(download_log)
                pic_n += 1
            page_n += 1


# 下载图片方法
def download_pic(download_link, pic_n):
    # 如果没有该目录 则创建目录
    # 替换||和&&确保能建立目录
    path = pic_path.replace('||', '[or]').replace('&&', '[and]')
    if not os.path.exists(path):
        os.makedirs(path)
    filename = '[' + str(pic_n) + ']' + download_link.split('/')[-1]
    path = path + filename
    # 判断文件是否存在
    if not os.path.exists(path):
        # 反向代理
        download_link = proxy_url + download_link
        # 避免SSL认证
        try:
            r = requests.get(download_link, verify=False)
            with open(path, 'wb+') as f:
                f.write(r.content)
        except Exception as e:
            print('出大问题，但别慌：', e)
    else:
        print('该文件已存在，跳过处理！')


# 按间距中的绿色按钮以运行脚本。
if __name__ == '__main__':
    url = 'https://anime-pictures.net/pictures/view_posts/0?'
    datas = read_config('config.yml')
    print('正在解析入口链接...')
    html = get_html(url, datas)
    get_pageInfo(html)
    if max_pic == 0:
        print('搜索结果为空，请前往配置文件更换检索词！')
        sys.exit()
    page_list = get_page_url()
    print('解析成功！')
    parser_url(page_list)
