'''
多线程爬取好游快爆top100
'''
import threading  # 多线程模块
import queue  # 队列模块
import requests
from lxml import etree
import re, random, time, json

concurrent = 3  # 采集线程数
conparse = 3  # 解析线程

class Crawl(threading.Thread):  # 采集线程类
    # 初始化
    def __init__(self, number, req_list, data_list):
        # 调用Thread 父类方法
        super(Crawl, self).__init__()
        # 初始化子类属性
        self.number = number
        self.req_list = req_list
        self.data_list = data_list
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'}

    # 线程启动的时候调用
    def run(self):
        # 输出启动线程信息
        print('启动采集线程%d号' % self.number)
        # 如果请求队列不为空，则无限循环，从请求队列里拿请求url
        while self.req_list.qsize() > 0:
            # 从请求队列里提取url
            url = self.req_list.get()
            print(url)
            print('%d号线程采集：%s' % (self.number, url))
            # 防止请求频率过快，随机设置阻塞时间
            time.sleep(random.randint(1, 3))
            # 发起http请求，获取响应内容，追加到数据队列里，等待解析
            response = requests.get(url, headers=self.headers)
            print(111)
            if response.status_code == 200:
                content = response.content
                game_id = re.findall(".*?/a/(.*?).htm", url)[0]
                print(game_id)
                text= {
                    "content": content,
                    "game_id": game_id
                }
                self.data_list.put(text)  # 向数据队列里追加
            else:
                print('%d号线程采集：%s失败' % (self.number, url))


class Parse(threading.Thread):  # 解析线程类
    # 初始化属性
    def __init__(self, number, data_list, req_thread, f):
        super(Parse, self).__init__()
        self.number = number  # 线程编号
        self.data_list = data_list  # 数据队列
        self.req_thread = req_thread  # 请求队列，为了判断采集线程存活状态
        self.f = f  # 获取文件对象
        self.is_parse = True  # 判断是否从数据队列里提取数据

    def run(self):
        print('启动%d号解析线程' % self.number)
        # 无限循环，
        while True:
            # 如何判断解析线程的结束条件
            for t in self.req_thread:  # 循环所有采集线程
                if t.is_alive():  # 判断线程是否存活
                    break
            else:  # 如果循环完毕，没有执行break语句，则进入else
                if self.data_list.qsize() == 0:  # 判断数据队列是否为空
                    self.is_parse = False  # 设置解析为False
            # 判断是否继续解析
            if self.is_parse:  # 解析
                try:
                    data = self.data_list.get(timeout=3)  # 从数据队列里提取一个数据
                except Exception as e:  # 超时以后进入异常
                    data = None
                # 如果成功拿到数据，则调用解析方法
                if data is not None:
                    self.parse(data)  # 调用解析方法
            else:
                break  # 结束while 无限循环
        print('退出%d号解析线程' % self.number)

    # 页面解析函数
    def parse(self, data):
        print(111)
        print(data)
        content = data["content"]
        game_id = data["game_id"]

        html = etree.HTML(content)

        item= {
            'game_id': game_id,
            'game_logo': 'https:' + html.xpath("//div[@class='gameDesc']/img/@src")[0],
            'score': html.xpath("//p[@class='score']/text()")[0],
            'comment': html.xpath('//div[@class="txtCon"]/div/text()')[0],
            'comment_count': html.xpath("//div[@class='card']/p[@class='num']/text()")[0],
            'tag': html.xpath("//div[@class='des']/p[@class='tag']/a/text()"),
            'rank': html.xpath("//ul[@class='gameFrag']//a/span/text()")[0]
        }
        self.f.write(json.dumps(item, ensure_ascii=False) + '\n')

def getList(url):
    try:
        # 定义请求头信息
        headers = {
            'User-Agent': 'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'
        }
        # 执行爬取
        res = requests.get(url, headers=headers)
        print(res.status_code)
        html = etree.HTML(res.text)
        items = html.xpath("//div/ul[@class='ranking-game ranking-list']/li/a/@href")
        # 判断响应状态,并响应爬取内容
        if res.status_code == 200:
            return items
        else:
            return None
    except Exception as e:
        print(e)

def main():
    req_list = queue.Queue()
    data_list = queue.Queue()
    f = open('result.json', 'w', encoding='utf-8')
    base_url='https://www.3839.com/top/hot.html'
    url_list = getList(base_url)
    for u in url_list:
        url = "https:"+u
        req_list.put(url)

    req_thread = []
    for i in range(concurrent):
        t = Crawl(i+1,req_list,data_list)
        t.start()
        req_thread.append(t)

    parse_thread = []
    for j in range(conparse):
        j = Parse(j+1,data_list,req_thread,f)
        j.start()
        parse_thread.append(j)

    for t in req_thread:
        t.join()
    for t in parse_thread:
        t.join()
    f.close()


if __name__=="__main__":
    main()