#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description: 
@Author: klww
@Time: 2020/05/23 17:33:25
@version: 0.1
'''

import requests
from fake_useragent import UserAgent
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import re
import os
import random
import time

index = 'https://wallhaven.cc'
ua = UserAgent()
# General = 100
Anime = 10
# People = 1
# cat = '00%d' % cat if cat < 10 else '0%d' % cat if cat < 100 else cat
categories = 111
# SFW = 100
# Sketchy = 10
# NSFW = 1
purity = 100
resolutions = '1920x1080'
atleast = '1920x1080'
ratios = '16x9'
topRange = '1w'
sorting = 'toplist'
order = 'desc'
params = {
    'categories': categories,
    'purity': purity,
    'resolutions': resolutions,
    'ratios': ratios,
    'topRange': topRange,
    'sorting': sorting,
    'order': order
}
topList = 'https://wallhaven.cc/search?categories={categories}\
&purity={purity}&resolutions={resolutions}&ratios={ratios}&topRange={topRange}\
&sorting={sorting}&order={order}&page='.format(**params)
classificationDict = {'search': {'url': '%s' % topList}}  # 存放网站分类子页面的信息
interval = 0.1  # 爬取图片的间隔时间
firstDir = 'D:/void/Haven'  # 总路径
linksTxt = 'D:/void/links.txt'  # 图片链接文件
hrefList = []
haven = 'wallhaven'


# 将目标地址转换为bs对象


def screen(url, spe):
    s = requests.Session()
    s.mount('http://', HTTPAdapter(max_retries=3))  # 设置重试次数为3次
    s.mount('https://', HTTPAdapter(max_retries=3))
    headers = ua.random  # 随机获取一个headers
    html = ''
    try:
        html = s.get(url=url, headers={'User-Agent': headers}, timeout=(3, 2))
    except Exception as e:
        print('Connect time out!')
        return []
    html.encoding = 'utf-8'  # 网站的编码
    html = html.text
    return BeautifulSoup(html, 'lxml', parse_only=spe)


# 将首页的三个分类放到字典里


def init_classification():
    url = index
    select = 'a'
    spe = SoupStrainer(class_='startpage-menu')
    soup = screen(url, spe)
    if not soup:
        print('Connect time out!')
        return
    classifications = soup.select(select)
    for c in classifications[0:3]:
        href = c['href']
        text = c.get_text()
        global classificationDict
        classificationDict[text] = {
            'url': href + '?page='
        }

# ui界面，用户选择下载分类


def ui():
    # print('--------------wallhaven-------------')
    # print('All')
    # for c in classificationDict.keys():
    #     print(c, end=' ')
    # # print()
    # choice = input('Pls input a category: ')
    # if(choice not in classificationDict.keys()):
    #     print("Not find,pls try again")
    #     print('----')
    #     ui()
    # else:
    #     select_classification(choice)
    select_classification('search')

# 选择下载分类子页面


def select_classification(choice):
    print('   ---------------------------')
    print('--------------' + choice + '-------------')
    print('   ---------------------------')
    url = classificationDict[choice]['url']
    # test = url + '2'
    # selectPage = 'h2'
    # spe = SoupStrainer('h2')
    # pageNum = screen(test, selectPage, spe)
    # text = re.findall(r'(?<=/ )\d*', str(pageNum[0].get_text))
    # 获取该分类的总页数
    # lastPagenum = int(text[0])
    # print(lastPagenum)
    while True:
        care = input('Did you care about data?[y/n]')
        if care not in ['y', 'n']:
            print('Input error,pls try again')
        else:
            print('Enjoying!')
            break
    for i in range(1, 2):
        url += '%d' % i
        select = 'a.preview,div.thumb-info'
        spe = SoupStrainer(class_=re.compile('preview|thumb-info'))
        soup = screen(url, spe)
        if not soup:
            print('Connect time out!')
            return
        classifications = soup.select(select)
        for c in classifications:
            # 图片链接
            if 'a' == c.name:
                src = c['href']
                print(src)
                if care == 'y':
                    judgeSize(src)
                else:
                    suffix = c.next_element.get_text()
                    switchToRealUrl(src, suffix)
    downloadFromFile()

# 判断图片的大小


def judgeSize(src):
    select = 'dd,#wallpaper'
    spe = SoupStrainer(['dd', 'img'])
    soup = screen(src, spe)
    if not soup:
        return
    soup = soup.select(select)
    size = soup[2].string
    print(size)
    if 'MiB' in size:
        if float(re.findall(r'(:?\d+\.\d*|\d*)', size)[0]) > 5.0:
            print('The picture is so big!')
            print()
            return
    # cat = soup[1].string
    # alt = soup[-1]['alt']
    src = soup[-1]['src']
    print(src)
    downloadLinks(src)

# 拼接真正的图片地址


def switchToRealUrl(src, suffix):
    # suffix = re.sub(r'\d|[a-z]| ', '', str(suffix)).lower()
    if 'PNG' in str(suffix):
        suffix = 'png'
    else:
        suffix = 'jpg'
    prefix = src[-6:-4]
    name = src[-6:]
    src = src.replace(haven, 'w.%s' % haven).replace('/w/', '/full/%s/' % prefix)\
        .replace(name, '%s-%s' % (haven, name)) + '.%s' % suffix
    print(src)
    downloadLinks(src)

# 将链接写入文件


def downloadLinks(src):
    with open(linksTxt, 'a') as f:
        f.write(src + '\n')
    print('Link has downloaded!')
    print()
    time.sleep(interval)

# 下载图片


def download(href):
    global firstDir
    name = re.findall(r'(?<=-).*\.(?:jpg|png)', str(href))[0]
    test = firstDir + '/' + name
    if os.path.exists(test):
        print('Picture has exist!')
        print()
        return
    if(isinstance(href, str)):
        response = requests.get(href)
        print(response.status_code)
        with open(test, 'wb') as pic:
            for chunk in response.iter_content(128):
                pic.write(chunk)

# 从文件中读取图片地址下载


def downloadFromFile():
    with open(linksTxt, 'r') as r:
        for line in r.readlines():
            src = line.strip()
            if not src:
                continue
            name = re.findall(r'(?<=-).*\.(?:jpg|png)', str(src))[0]
            print(name)
            test = firstDir + '/' + name
            if os.path.exists(test):
                print('Picture has exist!')
                print()
                continue
            if(isinstance(src, str)):
                response = requests.get(src)
                print(response.status_code)
                print()
                with open(test, 'wb') as pic:
                    for chunk in response.iter_content(128):
                        pic.write(chunk)


def main():
    if(not os.path.exists(firstDir)):
        os.mkdir(firstDir)  # 创建总目录
    # init_classification()
    ui()


if __name__ == '__main__':
    main()
# downloadFromFile()
