# -*- coding:utf-8 -*-

# 亿秀网  http://www.tu11.com/xingganmeinvxiezhen/

from bs4 import BeautifulSoup
from urllib import request
import requests
import os
import time
import random
import json
from User_Agent import UserAgent
from http import cookiejar

if __name__ == '__main__':
    s = requests.session()
    # 获取链接
    list_url = []
    for num in range(1, 3):
        # url = 'http://www.tu11.com/meituisiwatupian/list_2_%d.html' % num
        url = 'http://www.tu11.com/qingchunmeinvxiezhen/list_4_%d.html' % num

        headers = {
            "User-Agent": random.choice(UserAgent.USER_AGENTS)
        }

        req = s.get(url=url, headers=headers)
        req.encoding = 'gb2312'
        html = req.text
        bf = BeautifulSoup(html, 'lxml')
        targets_url = bf.find_all(class_='shupic')
        for each in targets_url:
            urlStr = each.p.a.get('title') + '=http://www.tu11.com' + each.a.get('href')
            print(urlStr)
            list_url.append(urlStr)

        print('链接采集完成')

    # 下载链接
    for each_img in list_url:
        img_info = each_img.split('=')
        filename = img_info[0]
        print('下载：' + filename)

        # 存在多页，取30页
        nameNum = 0
        for urlNum in range(1, 30):
            if urlNum == 1:
                target_url = img_info[1]
            else:
                target_url = img_info[1].replace('.html', ('_%d.html' % urlNum))
            print(target_url)

            try:
                nameNum += 1
                img_req = s.get(url=target_url, headers=headers)
                img_req.encoding = 'gb2312'
                img_html = img_req.text
                img_bf_1 = BeautifulSoup(img_html, 'lxml')
                img_url = img_bf_1.find_all('div', class_='nry')
                print(img_url)
                img_bf_2 = BeautifulSoup(str(img_url), 'lxml')
                img_urls = img_bf_2.find_all('img')  # 有多张大图，取全部
                print(img_urls)

                nameNum2 = 0
                for each_img_url in img_urls:
                    urlStr = each_img_url.get('src')
                    nameNum2 += 1
                    if 'tu11' not in os.listdir():
                        os.makedirs('tu11')
                    nameStr = filename + '_%d_%d.jpg' % (nameNum, nameNum2)
                    try:
                        print(urlStr)
                        print(nameStr)
                        request.urlretrieve(url=urlStr, filename='tu11/' + nameStr)

                    except Exception as e:
                        print(e)
                    time.sleep(1)

            except Exception as e:
                print(e)






