# -*- codeing:utf-8 -*-

# 美女图片 http://www.mm131.com
# 下载图片时，图片地址被重定向

from bs4 import BeautifulSoup
import requests
import os
from User_Agent import UserAgent
import random
# from urllib.request import urlretrieve
from io import BytesIO
from PIL import Image

if __name__ == '__main__':

    s = requests.session()
    # 获取链接
    url_list = []
    header = {
        "User-Agent": random.choice(UserAgent.USER_AGENTS)
    }

    # cookie = {'bdshare_firstime': '1519713958635',
    #            'CNZZDATA3866066': 'cnzz_eid=1214337972-1494676185-&ntime=1494676185',
    #            'Hm_lpvt_9a737a8572f89206db6e9c301695b55a': '1519716785',
    #            'Hm_lvt_9a737a8572f89206db6e9c301695b55a': '1519713959,1519715461',
    #            'UM_distinctid': '161d602b86daef-026cdadd7839e8-495960-240000-161d602b86e7c0'}

    for num in range(1, 10):
        if num == 1:
            url = 'http://www.mm131.com/xinggan/'
        else:
            url = 'http://www.mm131.com/xinggan/list_6_%d.html' %num

        req = s.get(url=url, headers=header)
        req.encoding = 'gb2312'
        html = req.text
        bf = BeautifulSoup(html, 'lxml')
        targetUrls = bf.find_all(class_='list-left public-box')
        bf2 = BeautifulSoup(str(targetUrls), 'lxml')
        targeturls2 = bf2.find_all('a', target='_blank')

        # 获取当前列表页面的图片链接
        for each in targeturls2:
            # print(each)
            # print(each.get('href'))
            # print(each.img.get('alt'))
            imgStr = each.img.get('alt')+'='+each.get('href')
            url_list.append(imgStr)

        print('第%d页链接采集完毕' %num)

        # 根据链接获取页面内的大图
        for img_each in url_list:
            img_Info = img_each.split('=')
            imgName = img_Info[0]
            print('下载：' + imgName)
            # 有多张图片
            for imgNum in range(1, 100):
                if imgNum == 1:
                    imgUrl = img_Info[1]
                else:
                    imgUrl = img_Info[1].replace('.html', ('_%d.html' % imgNum))
                print(imgUrl)

                try:
                    img_req = s.get(url=imgUrl, headers = header)
                    img_req.encoding = 'gb2312'
                    img_html = img_req.text
                    img_bf_1 = BeautifulSoup(img_html, 'lxml')
                    img_url = img_bf_1.find_all('div', class_='content-pic')
                    img_bf_2 = BeautifulSoup(str(img_url), 'lxml')
                    img_url = img_bf_2.a.img.get('src')
                    img_name = img_bf_2.a.img.get('alt')
                    if 'mm131' not in os.listdir():
                        os.makedirs('mm131')

                    if img_name:
                        filename = img_name + '.jpg'
                    else:
                        filename = imgName + '_%d.jpg' %imgNum
                    print(img_url+'***'+filename)

                    # 保存图片
                    try:
                        # urlretrieve(url=img_url, filename='mm131/'+filename)
                        headers = {
                            "User-Agent": random.choice(UserAgent.USER_AGENTS),
                            "Referer":imgUrl
                        }
                        response = s.get(img_url, headers = headers, allow_redirects=False)
                        if (response.status_code == 302):
                            print('allow_redirects=False')
                        else:
                            Image.open(BytesIO(response.content)).convert('RGB').save('mm131/'+filename)

                    except Exception as ee:
                        print(ee)

                except Exception as e:
                    print(e)

