# -*- coding: utf-8 -*-
import urllib.request
import hashlib
import re
from threading import Thread,Lock
from queue import Queue
import time
import os
 

def download_jpg(url):
    '''有些网站反盗链,这里模拟浏览器访问.
    Referer通常设置为None,若还无法访问,就设置为该网站的主页'''
    req_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
              'Accept':'text/html;q=0.9,*/*;q=0.8',
              'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
              'Accept-Encoding':'gzip',
              'Connection':'close',
              'Referer':'https://www.tuli.cc/' #设置抓取网站的host, 'http://www.tianya.cn/'
              }
    resp = urllib.request.urlopen(url)
    content = resp.read()
    
    if len(content) < 100000: #太小的图片不要
        return
    md5 = hashlib.md5(content).hexdigest()
    open('aiyouwu/'+md5+'.jpg','wb').write(content)
  
  
class Fetcher:
    '''多线程下载'''
    def __init__(self,threads):
        self.opener = urllib.request.build_opener(urllib.request.HTTPHandler)
        self.lock = Lock() #线程锁
        self.q_req = Queue() #任务队列
        self.q_ans = Queue() #完成队列
        self.threads = threads
        for i in range(threads):
            t = Thread(target=self.threadget)
            t.setDaemon(True)
            t.start()
        self.running = 0
  
    def __del__(self): #解构时需等待两个队列完成
        time.sleep(0.5)
        self.q_req.join()
        self.q_ans.join()
  
    def task_left(self):
        return self.q_req.qsize()+self.q_ans.qsize()+self.running
  
    def push(self,req):
        self.q_req.put(req)
  
    def pop(self):
        return self.q_ans.get()
  
    def threadget(self):
        while True:
            req = self.q_req.get()
            with self.lock: #要保证该操作的原子性，进入critical area
                self.running += 1
            download_jpg(req)
            self.q_ans.put(req)
            with self.lock:
                self.running -= 1
            self.q_req.task_done()
            time.sleep(0.1) # don't spam
 
 
def grab_jpg(url):
    '''普通抓取图片方式'''
    try:
        content = urllib.request.urlopen(url).read()
    except:
        #print('open failed')
        return []
    pattern = re.compile(r'src="([^>]+?\.jpg)"') #?变成非贪婪模式
    urls = pattern.findall(str(content))
    return urls
        
        
def grab_url_id_page(id):
    f = Fetcher(threads=2)
    for page in range(2,200):
        url = 'https://www.tuli.cc/AIYOUWU/2020/0225/%d_%d.html'%(id,page)
        pages = grab_jpg(url)
        if not pages:
            print(id,page)
            break
        for page in pages:
            f.push(page)
    while f.task_left():
        f.pop()  
        

grab_url_id_page(7199) 
raise       
for id in range(7280,7351):#range(7128,7200):
    grab_url_id_page(id)