# 爬取妹子图
from tkinter import *
import requests
import os
from bs4 import BeautifulSoup
import threading

MZITU = 'https://www.mzitu.com'

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'

HEADERS = {
    'User-Agent': USER_AGENT,
    'Referer': MZITU}

SAVE_PATH = 'D:\妹子图\\'

isDowning = False


class Mzitu:
    def __init__(self, root):
        self.root = root
        self.isDowning = FALSE
        self.init_view()

    def init_view(self):
        self.btn = Button(root, text="开始下载", command=self.start_download)
        self.btn.pack()

    def start_download(self):
        if not self.isDowning:
            self.isDowning = TRUE
            self.btn['text'] = '停止下载'
            self.t2 = threading.Thread(target=self.get_girl_list, args=(MZITU,))
            self.t2.setDaemon(True)
            self.t2.start()
            root.update()
            # self.get_girl_list(MZITU)
        else:
            SystemExit()

    def get_request(self, url):
        res = requests.get(url, headers=HEADERS)
        print("正在请求 " + url)
        return res

    def get_request_soup(self, url):
        html = self.get_request(url).text
        print("正在解析 ")
        return BeautifulSoup(html, 'lxml')

    def get_girl_list(self, url):
        soup = self.get_request_soup(url)
        next_url = soup.find(class_='next page-numbers').get('href')
        girls = soup.find(class_='postlist').select('li')
        for girl in girls:
            url = girl.find('a').get('href')
            name = girl.find('span').find('a').string
            self.get_girl_group(url, 1, name)
        if not next_url == "":
            self.get_girl_list(next_url)

    def get_girl_group(self, url, page, name):
        soup = self.get_request_soup(url + "/" + str(page))
        max_page = int(soup.find(class_='pagenavi').select('span')[-2].string)
        image = soup.find(class_='main-image').find('img').get('src')
        self.save_img(image, name, page)
        page += 1
        if page < max_page:
            self.get_girl_group(url, page, name)

    # 存储妹子图片到本地
    def save_img(self, url, file_name, page):
        file_path = SAVE_PATH + file_name
        image_path = file_path + "\\" + str(page) + ".jpg"
        print('检查文件是否存在 ' + image_path)
        if not os.path.exists(image_path):
            print("文件正在下载...")
            html = self.get_request(url).content
            print("下载成功")
            print("检查文件夹是否存在")
            exi = os.path.exists(file_path)
            if not exi:
                print("创建文件夹 " + file_name)
                os.makedirs(file_path)
            else:
                print("文件夹已存在 " + file_name)
            print("存入磁盘中...")
            f = open(image_path, 'wb')
            f.write(html)
            print("存放完成")
            f.close()
        else:
            print("文件已存在，无需重复下载" + image_path)


root = Tk()
root.title('妹子图')
root.geometry('300x300')
Mzitu(root)
root.mainloop()

# start_download()
