'''
使用多线程爬去斗图啦网站的最新表情前100页图片，并且下载到本地
'''
import os
import random
import re

import requests
from requests.exceptions import RequestException
from lxml import etree
from urllib import request, error
from queue import Queue
import threading
import time

# 设置不同请求头，防止服务器禁止访问
my_headers = [
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0"
]

# 设置不同的代理ip防止服务器禁止访问
# 代理可能过期
proxy_https_list = [
    'https://111.79.45.239:9999',
    'https://123.149.141.23:9999',
    'https://221.229.252.98:8080',
    'https://218.60.8.83:3129',
    'https://114.99.5.162:9999',
]
proxy_http_list = [
    'http://49.77.209.51:9999',
    'http://120.83.111.97:9999',
    'http://122.228.19.92:3389',
    'http://112.95.205.49:8888'
]


class Procuder(threading.Thread):
    def __init__(self, page_queue, img_queue, *args, **kwargs):
        super(Procuder, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            html = self.get_response(self.page_queue.get())
            if html is not None:
                self.parse_page(html)
            else:
                print('本页请求出错')

    # 获取网页的源码
    def get_response(self, url):
        global my_headers
        global proxy_http_list
        global proxy_https_list
        headers = {
            'User-Agent': random.choice(my_headers),
            'Host': 'www.doutula.com',
            'Connection': 'keep-alive'
        }
        proxies = {
            'http': random.choice(proxy_http_list),
            'https': random.choice(proxy_https_list)
        }
        try:
            response = requests.get(url, headers=headers, proxies=proxies)
            print(response.status_code)
            if response.status_code == 200:
                return response.text
            return None
        except RequestException as e:
            print('请求网页出错')
            print(e)
            return None

    # 解析网页下载图片
    def parse_page(self, content):
        html = etree.HTML(content)
        img_tags = html.xpath('//div[@class="page-content text-center"]//img[@class!="gif"]')
        for img_tag in img_tags:
            img_url = img_tag.get('data-original')
            alt = img_tag.get('alt')
            alt = re.sub(r'[\?？\.。,，!！;:；：\'\"\"‘“”\*]', '', alt)
            # 去后缀名两种方式
            # 法一
            # position = img_url.rfind('.')
            # suffix = img_url[position:]
            # 法二
            suffix = os.path.splitext(img_url)[1]
            filename = alt + suffix
            self.img_queue.put((img_url, filename))


class Consumer(threading.Thread):
    def __init__(self, page_queue, img_queue, *args, **kwargs):
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            if self.img_queue.empty() and self.page_queue.empty():
                break
            # 下载图片并保存到文件
            img_url, filename = self.img_queue.get()
            try:
                request.urlretrieve(img_url, 'images/' + filename)
                print(filename + '  下载完成！')
            except error.HTTPError as e:
                print(e)


# 主函数执行
def main():
    page_queue = Queue(100)
    img_queue = Queue(1000)
    start = time.time()
    url = 'http://www.doutula.com/photo/list/?page={}'
    # 加入100个页面的链接
    for i in range(1, 101):
        page_queue.put(url.format(i))

    # 创建5个生产者线程
    for x in range(5):
        t = Procuder(page_queue, img_queue)
        t.start()

    # 创建5个消费者
    for x in range(5):
        t = Consumer(page_queue, img_queue)
        t.start()

    end = time.time()
    print('一共用时', (end - start))


if __name__ == '__main__':
    main()
