# Author    : CoffeeChicken
# Date      : 2020-10-28 19:34
# Function  : 演示 利用多线程来实现图片爬取


import requests
from lxml import etree
import os
from threading import Thread

url_front = "https://023vcc.com/?page={num}"

headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/85.0.4183.121 Safari/537.36"
}


def spider_img1():
    for i in range(2, 50):
        # 列表页url
        url = url_front.format(num=int(i))
        try:
            # 获取所有详情页url
            response_front = requests.get(url, headers, timeout=5).text
            # print(response_front)
            # with open('img.html', 'w') as fp:
            #     fp.write(response_front)
            tree = etree.HTML(response_front)
            link_list = tree.xpath("//article[@class='block--list']/a/@href")
        except:
            continue
        # print(link_list)
        # all_pages_img_links = []
        for links in link_list:
            try:
                # 获取所有图片url并保存图片
                response_page = requests.get(links, headers, timeout=5).text
                # print(response_page)
                tree1 = etree.HTML(response_page)
                img_links = tree1.xpath("//div[@class='entry-content grap']/img/@src")
                title = tree1.xpath("//title/text()")[0]
                # all_pages_img_links += img_links
                if not os.path.exists(title):
                    os.mkdir(title)
                for index in range(len(img_links)):
                    try:
                        response_pic = requests.get(img_links[index], headers, timeout=5)
                        with open(title + "/" + str(int(index + 1)) + ".jpg", "wb") as fp:
                            fp.write(response_pic.content)
                        print("======>", title, index, "<=====", "is ok")
                    except:
                        continue
            except:
                continue


def spider_img2():
    for i in range(50, 100):
        # 列表页url
        url = url_front.format(num=int(i))
        try:
            # 获取所有详情页url
            response_front = requests.get(url, headers, timeout=5).text
            # print(response_front)
            # with open('img.html', 'w') as fp:
            #     fp.write(response_front)
            tree = etree.HTML(response_front)
            link_list = tree.xpath("//article[@class='block--list']/a/@href")
        except:
            continue
        # print(link_list)
        # all_pages_img_links = []
        for links in link_list:
            try:
                # 获取所有图片url并保存图片
                response_page = requests.get(links, headers, timeout=5).text
                # print(response_page)
                tree1 = etree.HTML(response_page)
                img_links = tree1.xpath("//div[@class='entry-content grap']/img/@src")
                title = tree1.xpath("//title/text()")[0]
                # all_pages_img_links += img_links
                if not os.path.exists(title):
                    os.mkdir(title)
                for index in range(len(img_links)):
                    try:
                        response_pic = requests.get(img_links[index], headers, timeout=5)
                        with open(title + "/" + str(int(index + 1)) + ".jpg", "wb") as fp:
                            fp.write(response_pic.content)
                        print("======>", title, index, "<=====", "is ok")
                    except:
                        continue
            except:
                continue


def spider_img3():
    for i in range(100, 150):
        # 列表页url
        url = url_front.format(num=int(i))
        try:
            # 获取所有详情页url
            response_front = requests.get(url, headers, timeout=5).text
            # print(response_front)
            # with open('img.html', 'w') as fp:
            #     fp.write(response_front)
            tree = etree.HTML(response_front)
            link_list = tree.xpath("//article[@class='block--list']/a/@href")
        except:
            continue
        # print(link_list)
        # all_pages_img_links = []
        for links in link_list:
            try:
                # 获取所有图片url并保存图片
                response_page = requests.get(links, headers, timeout=5).text
                # print(response_page)
                tree1 = etree.HTML(response_page)
                img_links = tree1.xpath("//div[@class='entry-content grap']/img/@src")
                title = tree1.xpath("//title/text()")[0]
                # all_pages_img_links += img_links
                if not os.path.exists(title):
                    os.mkdir(title)
                for index in range(len(img_links)):
                    try:
                        response_pic = requests.get(img_links[index], headers, timeout=5)
                        with open(title + "/" + str(int(index + 1)) + ".jpg", "wb") as fp:
                            fp.write(response_pic.content)
                        print("======>", title, index, "<=====", "is ok")
                    except:
                        continue
            except:
                continue


def spider_img4():
    for i in range(150, 200):
        # 列表页url
        url = url_front.format(num=int(i))
        try:
            # 获取所有详情页url
            response_front = requests.get(url, headers, timeout=5).text
            # print(response_front)
            # with open('img.html', 'w') as fp:
            #     fp.write(response_front)
            tree = etree.HTML(response_front)
            link_list = tree.xpath("//article[@class='block--list']/a/@href")
        except:
            continue
        # print(link_list)
        # all_pages_img_links = []
        for links in link_list:
            try:
                # 获取所有图片url并保存图片
                response_page = requests.get(links, headers, timeout=5).text
                # print(response_page)
                tree1 = etree.HTML(response_page)
                img_links = tree1.xpath("//div[@class='entry-content grap']/img/@src")
                title = tree1.xpath("//title/text()")[0]
                # all_pages_img_links += img_links
                if not os.path.exists(title):
                    os.mkdir(title)
                for index in range(len(img_links)):
                    try:
                        response_pic = requests.get(img_links[index], headers, timeout=5)
                        with open(title + "/" + str(int(index + 1)) + ".jpg", "wb") as fp:
                            fp.write(response_pic.content)
                        print("======>", title, index, "<=====", "is ok")
                    except:
                        continue
            except:
                continue


if __name__ == '__main__':
    a1 = Thread(target=spider_img1)
    a2 = Thread(target=spider_img2)
    a3 = Thread(target=spider_img3)
    a4 = Thread(target=spider_img4)
    a1.start()
    a2.start()
    a3.start()
    a4.start()
