import requests
import urllib.request
from bs4 import BeautifulSoup   #对网页进行提取重要内容
from one_game_detail_info import *
import json

#soup = soup.find_all('a')
'''
for item in soup:
    gameType.append(item.get_text())

#输出打印所有的游戏类型
print(gameType)
'''
#请求网页页面内容函数
def web_content(url):
    #请求页面地址
    req = urllib.request.Request(url)
    # 增加头部信息
    req.add_header('User-Agent',
                   'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36')
    # 读取网页内容 编码方式为gbk 此地方有一个坑
    html = urllib.request.urlopen(req).read().decode('gbk')
    soup = BeautifulSoup(html, features='lxml')
    return soup
#爬取游戏标签
def spider_action(soup):
    #查询span标签下面class为tit的html内容
    soup = soup.find_all('span', {'class': 'tit'})
    #把游戏的各种类型显示出来 python列表进行删除元素，其返回值为删除掉的元素
    soup = soup.pop(1)
    # 在进行一次数据清洗
    gameTag = soup.text
    return gameTag
#爬取所有的动作游戏
def spider_actionGame(soup):
    #使用id标签爬取的内容不够纯净，杂质较多
    #soup = soup.find_all('ul',id='classic')
    #使用class标签进行爬取
    soup = soup.find_all('ul',{'class':'list affix cf'})
    for item in soup:
        #print(item)
        #print(type(item))
        item = item.find_all('a')
        for game in item:
            #print(game.text)
            all_game.append(game.text)
    # print(all_game)
    return all_game
#爬取动作游戏下面的对应超链接，目的是为了爬取其介绍，但是有的游戏页面可能没有游戏介绍，直接设置为空即可
def spider_action_game_url(soup):
    # 使用class标签进行爬取
    soup = soup.find('ul', {'class': 'list affix cf'}).find_all('a')
    #获取a标签对应的超链接
    for i in soup:
        game_url.append(i.get('href'))
    print(game_url)

    every_game_url = 'http://www.4399.com'
    for i in game_url:
        #判断i是否是以http开头
        if i.startswith('http'):
            game_intro.append('本页没有该游戏的内容介绍')
            # print(game_intro)
            # print(i)
        else:
            response = requests.get(every_game_url+i)
            if response.status_code != 404:
                response = spider_detail_info(every_game_url+i)
                spider_introduce(response,game_intro)
                # print(game_intro)
                # print(i)
            else:
                game_intro.append('本页没有该游戏的内容介绍')
                # print(game_intro)
                # print(i)
        # print(i)
    print(game_intro)
#爬取所有的当前游戏类型下面的网址(主要是页面的地址)
def spider_url(soup):
    soup = soup.find_all('div', {'class': 'pag'})
    #获取自己需要的网页地址
    soup = soup.pop(1)
    #print(soup)
    soup = soup.find_all(name='a')# 找到a标签下，属性值
    #删除列表当中的首位元素
    del soup[0]
    soup.pop() #可以删除掉末尾元素
    #print(soup)
    for item in soup:
        #print(item['href'])
        item = item['href']
        page_url.append(item)
    #print(soup)

if __name__ == '__main__':
    # 以字典的形式进行保存所有的游戏，key值为游戏类型名，value值为该类型下面的所有游戏名
    action_game = {}
    all_game = []
    #所有的游戏页面地址
    page_url = []
    #保存所有的游戏地址
    game_url = []
    #保存游戏介绍
    game_intro = []
    # dict_game = {}
    #爬取4399小游戏网站 爬取动作类型的所有游戏 flash_fl/2_1.htm
    str = '/flash_fl/2_1.htm' #此处的str是需要随时改变的
    url = 'http://www.4399.com'+str

    #首页进行请求
    soup = web_content(url)
    # 调用爬取游戏标签函数 已经成功
    gameType = spider_action(soup)
    #爬取4399每个游戏类型下面第一页的信息
    spider_actionGame(soup)
    #爬取每个动作游戏对应的超链接
    spider_action_game_url(soup)

    #调用爬取所有页面地址的函数
    spider_url(soup)
    #print(page_url)
    #循环遍历所有的页面地址进行爬取内容
    for suffix in page_url:
        url =  'http://www.4399.com'+suffix
        print(url)
        content = web_content(url)
        spider_actionGame(content)
        spider_action_game_url(soup)
    #print(all_game)
    action_game[gameType] = all_game
    # for_eachGame_add_intro()

    # 在此处将游戏介绍，添加到每个游戏的后面
    i=0
    # for value in action_game.values():
    #     dict_game = {}
    #     for item in value:
    #         dict_game[item] = {"介绍":game_intro[i]}
    #         # print(dict_game)
    #         i = i + 1
    print(len(game_intro))
    # with open('test.txt', 'w', encoding='utf-8') as f:
    #     #json.dumps(action_game,ensure_ascii=False)
    #     #print(json.dumps(action_game,ensure_ascii=False))
    #     f.write(json.dumps(dict_game,ensure_ascii=False))












    # for key,value in action_game.items():
    #     print("\nkey:"+key)
    #     print(value)
    #     with open('1.txt','w') as f:
    #         f.write(key)
    #         f.write(str(value))
        # for v in value:
        #     print("Value:"+v)

    #将所有的游戏写入进data.txt
    #print(json.dumps(action_game))
    # with open('data.txt', 'w', encoding='utf-8') as f:
    #     #json.dumps(action_game,ensure_ascii=False)
    #     #print(json.dumps(action_game,ensure_ascii=False))
    #     f.write(json.dumps(action_game,ensure_ascii=False))
    print(action_game)
