# -*- coding:utf-8 -*-
import requests
import threading
import re
import time


all_urls = []


class Spider():
    def __init__(self, target_url, headers):
        self.target_url = target_url
        self.headers = headers

    def getUrls(self, start_page, page_num):
        global all_urls
        for i in range(start_page, page_num):
            url = self.target_url % i
            all_urls.append(url)


class Producer(threading.Thread):
    def run(self):
        print("Producer Start")
        headers = {
            "Host": "www.newsimg.cn",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537..36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5383.400 QQBrowser/10.0.1313.400"
        }
        global all_urls
        while len(all_urls) > 0:
            g_lock.acquire()
            page_url = all_urls.pop()
            g_lock.release()
            try:
                print("Analyzing " + page_url)
                response = requests.get(page_url, headers=headers, timeout=3)
                # print(response.text)
                #all_pic_link = re.findall('<a target=\'_blank\' href="(.*?)">',response.text,re.S)
                all_pic_link = re.findall('data-original="(.*?)"',response.text,re.S)
                #all_pic_link = re.findall('src="(.*?)"', response.text, re.S)
                global all_img_urls
                g_lock.acquire()
                all_img_urls += all_pic_link
                print(all_img_urls)
                g_lock.release()
                time.sleep(0.5)
            except:
                print("Exception!!")
                pass


#class Consumer(threading.Thread):
#    def run(self):
#        print("Producer Start")
#        headers = {
#            "Host": "www.newsimg.cn",
#            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537..36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5383.400 QQBrowser/10.0.1313.400"
#        }
#        global all_img_urls
#        print("%s is running " % threading.current_thread)
#        while len(all_img_urls) > 0:
#            g_lock.acquire()
#            img_url = all_img_urls.pop()
#            g_lock.release()
#            try:
#                response = requests.get(imgr_url, headers=headers)
#                response.encoding='gb2312'


class DownPic(threading.Thread):

    def run(self):
        print("DownPic Start")
        headers = {
            "Host": "www.newsimg.cn",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537..36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5383.400 QQBrowser/10.0.1313.400"
        }
        global all_img_urls
        print("%s is running " % threading.current_thread)
        while True:
            if len(all_img_urls) > 0:
                g_lock.acquire()
                img_url = all_img_urls.pop()
                g_lock.release()
                for pic in range(10):
                    filename = str(pic)+'.jpg'
                    print(f"Debug: current pic website is {img_url}")
                    response = requests.get(img_url, headers=headers)
                    if pic==1:
                        print("XXXXXXXXXXXXXX")
                        print(response.content)
                    with open(filename, 'wb') as f:
                        f.write(response.content)
                        f.close


def run():
    print("Spider running!")
    headers = {
        "Host": "www.newsimg.cn",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537..36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5383.400 QQBrowser/10.0.1313.400"
    }
    target_url = 'https://www.mzitu.com/zipai/comment-page-%d/'

    spider = Spider(target_url, headers)
    spider.getUrls(1, 6)
    print(all_urls)

    response = requests.get("http://www.newsimg.cn/big201710leaderreports/xibdj20171030.jpg", headers=headers)
    #response = requests.get("https://wxt.sinaimg.cn/mw690/9d52c073jw1e93ell0atjj20g40o7gq0.jpg", headers=headers)
    with open("a.jpg", "wb") as f:
        f.write(response.content)
        f.close
    print(response.text)


if __name__ == "__main__":
    all_img_urls = []
    threads = []
    g_lock = threading.Lock()

    run()
    for x in range(2):
        t = Producer()
        t.start()
        threads.append(t)

    for tt in threads:
        tt.join()

    for x in range(2):
        down = DownPic()
        down.start()

    print("Done!")