#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author：albert time:2019/7/23
import  random
import  threading
import  requests
import  re

'''UserAgent做一个简单的反爬处理'''
class Config(object):
    def getHeaders(self):
        user_agent_list = [ \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
        UserAgent = random.choice(user_agent_list)
        headers = {'User-Agent':UserAgent}
        return headers
urls = ['http://www.moko.cc/subscribe/chenhaoalex/1.html']
index = 0 #索引
g_lock = threading.Lock()  #初始化一个锁

class Producer(threading.Thread):
    '''生产者'''
    def run(self):
        print('线程启动。。。。。')
        headers = Config.getHeaders(self)
        # print(headers)
        global urls
        global index
        while True:
            g_lock.acquire()#加锁
            #判断urls中是否还有url
            if len(urls) == 0:
                g_lock.release()#解锁
                continue
            page_url = urls.pop()
            g_lock.release()#解锁
            response = ''
            try:
                response = requests.get(page_url,headers=headers,timeout=5)
            except Exception as http:
                print("生产者异常")
                print(http)
                continue
            #获取网页原代码
            content = response.text
            # print(content)
           #匹配当前页数
            is_hone = re.search(r'(\d*?)\.html',page_url).group(1)
            # 如果是第一页，那么需要判断
            if is_hone == str(1):
                '''获取总的页数'''
                pages = re.findall(r'onfocus=\"this\.blur\(\)\">(\d*?)<',content,re.S)
                page_size = 1
                if pages:
                    page_size = int(max(pages))
                    if page_size > 1:
                        url_arr = []
                        threading_links_1 = []
                        for page in range(2,page_size+1):
                            url =  re.sub(r'(\d*?)\.html',str(page)+".html",page_url)
                            threading_links_1.append(url)
                            g_lock.acquire()
                            index += 1
                            g_lock.release()

                            url_arr.append({ "index":index, "link": url})

                        g_lock.acquire()
                        urls += threading_links_1  #  URL数据添加
                        g_lock.release()
                        print(url_arr)
            # '''找每个用户的名'''
            # rc = re.compile(r'<a class=\"imgBorder\" href=\"\/(.*?)" hidefocus=\"true\">')
            # follows = rc.findall(content)
            # # print(follows)
            # fo_url = []
            # threading_links_2 = []
            # for u in follows:
            #     this_url = 'http://www.moko.cc/subscribe/%s/1.html' %u
            #     g_lock.acquire()
            #     index += 1
            #     g_lock.release()
            #     fo_url.append({'index':index,'link':this_url})
            #     threading_links_2.append(this_url)
            # g_lock.acquire()
            # urls += threading_links_2
            # g_lock.release()
            # print(fo_url)

if __name__ == '__main__':
    for i in range(5):
        p = Producer()
        p.start()


