import bs4
import time
import requests
import fake_useragent
import os
import re
import sys
import xlrd
import random

# sys.setdefaultencoding('utf-8')  # u -> str
# sys.setdefaultencoding('gbk')  # u -> str
seed = 'http://www.mmonly.cc/mmtp/list_9_1.html'

url = 'http://www.mmonly.cc/mmtp/xgmn/244934.html'


# http://www.mmonly.cc/mmtp/xgmn/244934.html

# body > div:nth-child(2) > div.photo > div.wrapper.clearfix.imgtitle > div.pages > ul > li:nth-child(1) > a


def get_img_sum_num(img_url):
    fa = fake_useragent.UserAgent()
    headers = {'User-Agent': fa.random,
               'Referer': url}
    request = requests.get(img_url, headers=headers, )
    request.encoding = 'gbk'
    # print(request.encoding)
    # print(request.text)
    soup = bs4.BeautifulSoup(request.text, 'html.parser')

    return soup.find_all('a')[55].text.strip()[1:-2]


# get_img_sum_num(url)

# 得到每套图的url
def get_img_urls(base, sum):
    img_urls = []
    # insert = ''
    for i in range(1, sum + 1):
        if i == 1:
            insert = ''
        else:
            insert = '_' + str(i)
        img_urls.append(base[:-5] + insert + base[len(base) - 5:])

    # print(img_urls)
    return img_urls


# get_img_urls(url,9)


def convert2imgurl(html_url):
    """

    :param html_url: 网页的url
    :return: imgurl
    """
    # for i in html_url:
    #     fa = fake_useragent.UserAgent()
    #     headers = {'User-Agent': fa.random,
    #                'Referer': html_url}
    #     request = requests.get(i, headers=headers)

    fa = fake_useragent.UserAgent()
    headers = {'User-Agent': fa.random,
               'Referer': html_url}
    request = requests.get(html_url, headers=headers)
    request.encoding = 'gbk'
    # print(request.encoding)
    # print(request.text)
    soup = bs4.BeautifulSoup(request.text, 'html.parser')
    # big-pic > p > a > img
    # print(soup.find_all('img'))
    print(soup.find_all('img')[1].attrs['src'])
    return soup.find_all('img')[1].attrs['src']


convert2imgurl(url)


def down_image(img_urls):
    img_name = str(img_urls[0].split('/')[-2]) + '-' + str(img_urls[0].split('/')[-3])
    if os.path.exists(img_name):  # 查重 如果这个文件夹存在则跳过 防止重复下载
        time.sleep(1)
        print(img_name + '存在')
        return
    os.mkdir(img_name)
    for img_url in img_urls:
        fa = fake_useragent.UserAgent()
        headers = {'User-Agent': fa.random,
                   'Referer': url}
        request = requests.get(img_url, headers=headers)

        with open(img_name + u'/' + img_url.split('/')[-1], 'wb') as f:
            f.write(request.content)  # contents返回的为二进制   text返回的为union类型
            f.close()
            print("已保存" + img_name + '/' + img_url.split('/')[-1])
            time.sleep(random.random() * 2)


def convert2imgurls(html_urls):
    list1 = []
    for i in html_urls:
        list1.append(convert2imgurl(i))
    return list1


def down_onet(url):
    try:
        sum1 = int(get_img_sum_num(url))
        list_html = get_img_urls(url, sum1)
        print(type(list_html))
        print(list_html)
        list_img = convert2imgurls(list_html)
        down_image(list_img)
    except Exception as e:
        print(e.args)
        print('当前url:' + url)


base_url = 'http://www.mmonly.cc/mmtp/list_9_'
list_seeds = []
for i in range(1, 786):
    new_url = base_url + str(i) + '.html'
    list_seeds.append(new_url)
# print(list_seeds)

firsturl = list_seeds[0]
print('firsturl:', firsturl)


def get_imgs(firsturl):
    fa = fake_useragent.UserAgent()
    headers = {'User-Agent': fa.random,
               'Referer': url}
    request = requests.get(firsturl, headers=headers, )
    request.encoding = 'gbk'
    # print(request.encoding)
    # print(request.text)
    soup = bs4.BeautifulSoup(request.text, 'html.parser')

    # list1 = soup.find_all('a', target=re.compile('_blank'))

    list1 = soup.find('div', id=re.compile('infinite_scroll')).find_all('a', target=re.compile('_blank'))
    # print('list1:',list1)
    print(list1[0].attrs.get('href'))
    print(list1[1].attrs.get('href'))
    print(list1[2].attrs.get('href'))
    # 还是正好有重复的,那么去重
    print(len(list1))
    list_temp = []
    for k, i in enumerate(list1):
        print(k)
        print(i.attrs.get('href'))
        list_temp.append(i.attrs.get('href'))
    #     # print(list1[4])
    return list_temp[::2]

res = get_imgs(firsturl)
print(res)
print(len(res))
for i in list_seeds:
    down_onet(i)
