# -*- coding:utf-8 -*-

"""
新浪图片爬取
"""

__author__ = 'LheZi'

import threading, requests, os, re, time
from urllib import request, error
from queue import Queue
from lxml import etree
from simplejson import errors

# 页码
PAGE = 0
# 图片地址数量
URLCOUNT = 0
# 下载数量
COUNT = 0
gLock = threading.Lock()


class GetPageUrl(threading.Thread):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
        'Host': 'search.sina.com.cn',
    }
    url = "http://search.sina.com.cn/?c=img&ie=gbk&ps=&pf=&format=api_html&num=10&sort=rel"

    def __init__(self, img_queue, q, *args, **kwargs):
        super(GetPageUrl, self).__init__(*args, **kwargs)
        self.img_queue = img_queue
        self.q = q

    def run(self):
        global PAGE, URLCOUNT
        while True:
            gLock.acquire()
            if PAGE < 0:
                gLock.release()
                break
            PAGE += 1
            params = {
                'q': self.q,
                'page': PAGE,
            }

            response = requests.get(self.url, params=params, headers=self.headers)
            time.sleep(0.5)

            try:
                data = response.json()['data']
            except errors.JSONDecodeError as e:
                print('尝试重新获取第%s页...' % PAGE)
                time.sleep(10)
                PAGE -= 1
                gLock.release()
                continue

            if data:
                html = etree.HTML(data)
                containers = html.xpath('//div[@class="cell"]')
                print('#' * 20, PAGE)
                gLock.release()
                for index, container in enumerate(containers):
                    img_url = container.xpath('.//img/@src')[0]
                    titles = container.xpath('.//p[1]/text()')
                    ext = os.path.splitext(img_url)[1]
                    title = ''
                    for t in titles:
                        title += t.strip(' ')
                    title = re.sub(r'["<>,，。\s/\|？\?\.\*]', '', title)
                    filename = '%s(%d)%s' % (title, index, ext)
                    self.img_queue.put((img_url, filename))
                    URLCOUNT += 1
            else:
                print('页面读取完毕', '*' * 10, PAGE)
                PAGE = -1
                gLock.release()
                break


class Download(threading.Thread):
    def __init__(self, img_queue, q, *args, **kwargs):
        super(Download, self).__init__(*args, **kwargs)
        self.img_queue = img_queue
        self.q = q

    def run(self):
        global PAGE, COUNT
        while True:
            if self.img_queue.empty() and PAGE < 0:
                print('*' * 10, threading.current_thread().name, '下载结束', '*' * 10)
                print(URLCOUNT, COUNT)
                break
            img_url, filename = self.img_queue.get()
            try:
                request.urlretrieve(img_url, '%s/%s' % (self.q, filename))
                time.sleep(0.5)
                COUNT += 1
                print(filename, '下载完成')
            except (error.ContentTooShortError, error.HTTPError) as e:
                print('%s 下载未完成: %s' % (filename, e))


def main(q):
    if not os.path.exists(q):
        os.mkdir(q)
    img_queue = Queue(0)
    for x in range(5):
        page = GetPageUrl(img_queue, q)
        page.start()
    for x in range(4):
        download = Download(img_queue, q)
        download.start()


if __name__ == '__main__':
    main(input('输入你想要爬取的关键词:'))
