import requests
from bs4 import BeautifulSoup

# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
#                          'Chrome/51.0.2704.63 Safari/537.36'}
# http_prefix = 'http:'
#
#
# def next_page(soup):
#     if soup is None:
#         return None
#
#     navi = soup.select('.cp-pagenavi')
#     if navi is None or len(navi) == 0:
#         return None
#
#     span = None
#     for sub in navi[0].contents:
#         if sub.name == 'span':
#             span = True
#             continue
#
#         if sub.name == 'a' and span:
#             ret = sub.attrs['href']
#             if not ret.startswith(http_prefix):
#                 ret = http_prefix + ret;
#             return ret, sub.get_text().strip()
#
#     return None
#
#
# class meiZiTu:
#     def __init__(self):
#         self._url = "http://jandan.net/ooxx"
#         self._tasks = []
#
#     def start(self):
#         self.loop(self._url, page_title='')
#
#     def loop(self, page_url, **kwargs):
#         print("looping page url: ", page_url, kwargs['page_title'])
#
#         resp = requests.get(url=page_url, headers=headers)
#         if resp is None or resp.status_code != 200:
#             code = 0
#             if resp is not None:
#                 code = resp.status_code
#
#             raise Exception(url=page_url, resp=resp, code=code)
#
#         soup = BeautifulSoup(resp.text, 'lxml')
#         nav, title = next_page(soup)
#         if nav is None:
#             print("crawling finished at url ", page_url)
#             return
#         self.loop(nav, page_title=title)
#
#
#
# if __name__ == '__main__':
#     print("Hello There!")
#
#     mzt = meiZiTu()
#     mzt.start()
#
#
# class task:
#     def __init__(self):
#         self._url = ''
#         self._id = ''


'''
这是煎蛋网的图片
Hello There!
looping page url:  http://jandan.net/ooxx 
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMjM=#comments 23
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMjI=#comments 22
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMjE=#comments 21
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMjA=#comments 20
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTk=#comments 19
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTg=#comments 18
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTc=#comments 17
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTY=#comments 16
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTU=#comments 15
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTQ=#comments 14
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTM=#comments 13
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTI=#comments 12
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTE=#comments 11
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMTA=#comments 10
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtOQ==#comments 9
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtOA==#comments 8
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtNw==#comments 7
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtNg==#comments 6
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtNQ==#comments 5
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtNA==#comments 4
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMw==#comments 3
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMg==#comments 2
looping page url:  http://jandan.net/ooxx/MjAyMTA1MjgtMQ==#comments 1
Traceback (most recent call last):
  File "<input>", line 1, in <module>
  File "D:\Program Files\PyCharm 2021.1.1\plugins\python\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile
    pydev_imports.execfile(filename, global_vars, local_vars)  # execute the script
  File "D:\Program Files\PyCharm 2021.1.1\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
    exec(compile(contents+"\n", file, 'exec'), glob, loc)
  File "E:/my/python-study/demo/爬xiaojiejie案例/demo.py", line 64, in <module>
    mzt.start()
  File "E:/my/python-study/demo/爬xiaojiejie案例/demo.py", line 38, in start
    self.loop(self._url, page_title='')
  File "E:/my/python-study/demo/爬xiaojiejie案例/demo.py", line 56, in loop
    self.loop(nav, page_title=title)
  File "E:/my/python-study/demo/爬xiaojiejie案例/demo.py", line 56, in loop
    self.loop(nav, page_title=title)
  File "E:/my/python-study/demo/爬xiaojiejie案例/demo.py", line 56, in loop
    self.loop(nav, page_title=title)
  [Previous line repeated 20 more times]
  File "E:/my/python-study/demo/爬xiaojiejie案例/demo.py", line 52, in loop
    nav, title = next_page(soup)
TypeError: cannot unpack non-iterable NoneType object
'''

import urllib.request
from lxml import etree


def create_requset(page):
    base_url = 'https://www.mzitu.com/page/'
    url = base_url + str(page)

    headers = {
        'Referer': 'https://www.mzitu.com/',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3679.0 Safari/537.36'
    }
    request = urllib.request.Request(url=url, headers=headers)
    return request


def get_content(request):
    response = urllib.request.urlopen(request)
    # print(response.geturl())
    content = response.read().decode('utf-8')
    referer = response.geturl()
    return content, referer


def get_img_url(content):
    tree = etree.HTML(content)
    src_list = tree.xpath('//ul/li/a/img/@data-original')
    alt_list = tree.xpath('//ul/li/a/img/@alt')
    return alt_list, src_list


def download_img(alt_list, src_list, referer):
    for i in range(len(alt_list)):
        file_name = './img/' + alt_list[i].split('?')[0] + '.jpg'
        url = src_list[i]
        opener = urllib.request.build_opener()
        opener.addheaders = [('Referer', referer),
                             ('User-Agent',
                              'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3679.0 Safari/537.36')]
        urllib.request.install_opener(opener)
        urllib.request.urlretrieve(url=url, filename=file_name)


if __name__ == '__main__':
    strat_page = int(input('请输入起始页码(最大255)：'))
    end_page = int(input('请输入截止页码(最大255)：'))
    for page in range(strat_page, end_page + 1):
        request = create_requset(page)
        content, referer = get_content(request)
        alt_list, src_list = get_img_url(content)
        # for i in range(len(alt_list)):
        #     print(alt_list[i], src_list[i])
        download_img(alt_list, src_list, referer)
    print('爬取完成！！！！')
