from requests.exceptions import RequestException
from lxml import etree
import requests
import re,time,json
import threading


def getList(url):
    try:
        # 定义请求头信息
        headers = {
            'User-Agent': 'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'
        }
        # 执行爬取
        res = requests.get(url, headers=headers)
        print(res.status_code)
        html = etree.HTML(res.text)
        items = html.xpath("//div/ul[@class='ranking-game ranking-list']/li/a/@href")
        # 判断响应状态,并响应爬取内容
        if res.status_code == 200:
            return items
        else:
            return None
    except Exception as e:
        print(e)


def getPage(url):
    '''爬取指定url页面信息'''
    try:
        #定义请求头信息
        headers = {
            'User-Agent':'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'
        }
        # 执行爬取
        res = requests.get(url,headers=headers)
        print(res.status_code)
        html = etree.HTML(res.text)
        items = html.xpath("//div/ul[@class='ranking-game ranking-list']/li/a/@href")
        #判断响应状态,并响应爬取内容
        if res.status_code == 200:
            length = len(items)
            print(length)
            for i in range(length):
                content = requests.get('https:'+items[i]).content
                game_id = re.findall(".*?/a/(.*?).htm",items[i])[0]
                print(game_id)
                yield {
                    "content":content,
                    "game_id":game_id
                }
        else:
            return None
    except Exception as e:
        print(e)

def parsePage(items):
    length = len(items)
    print(length)
    for i in range(length):
        content = requests.get('https:' + items[i]).content
        game_id = re.findall(".*?/a/(.*?).htm", items[i])[0]

        html = etree.HTML(content)

        yield {
        'game_id':game_id,
        'game_logo':'https:'+html.xpath("//div[@class='gameDesc']/img/@src")[0],
        'score':html.xpath("//p[@class='score']/text()")[0],
        'comment':html.xpath('//div[@class="txtCon"]/div/text()')[0],
        'comment_count':html.xpath("//div[@class='card']/p[@class='num']/text()")[0],
        'tag':html.xpath("//div[@class='des']/p[@class='tag']/a/text()"),
        'rank':html.xpath("//ul[@class='gameFrag']//a/span/text()")[0]
        }


def writeFile(content):
        '''执行文件追加写操作'''
        #print(content)
        with open("./result.txt",'a',encoding='utf-8') as f:
            f.write(json.dumps(content,ensure_ascii=False) + "\n")
            #json.dumps 序列化时对中文默认使用的ascii编码.想输出真正的中文需要指定ensure_ascii=False

def main(urlList):
    ''' 主程序函数，负责调度执行爬虫处理 '''
    for page in parsePage(urlList):
        writeFile(page)

# 判断当前执行是否为主程序运行，并遍历调用主函数爬取数据
if __name__ == '__main__':
    url = "https://www.3839.com/top/hot.html"
    urlList=getList(url)
