import urllib.request
import requests
from bs4 import BeautifulSoup
import json
#请求网页页面内容函数
def web_content(url):
    #请求页面地址
    req = urllib.request.Request(url)
    # 增加头部信息
    req.add_header('User-Agent',
                   'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36')
    # 读取网页内容 编码方式为gbk 此地方有一个坑
    html = urllib.request.urlopen(req).read().decode('utf-8')
    soup = BeautifulSoup(html, features='lxml')
    return soup
##爬取所有的动作推荐游戏以及它们的超链接
def spider_all_action(soup):
    #第一步筛选推荐游戏的区域，在页面当中的区域为div class='wrap'
    first_step = soup.find(lambda tag:tag.name=='div' and tag.get('class')==['wrap'])
    #第二步先大致选出首页当中推荐的所有动作游戏
    second_step = first_step.find('ul',{'class':'game_list'})
    #直接取p标签，来获取游戏名称
    print(type(second_step))
    last_step = second_step.find_all('a')
    #
    for game in last_step:
        #定义一个字典类型用于保存游戏名称以及对应的超链接
        dic1 = {}
        #寻找a标签的超链接href
        href = game.get('href')
        name = game.find('p').text
        # print(href+name)
        dic1[name] = href
        all_game.append(dic1)
    return all_game
#爬取游戏的相关信息 例如游戏介绍 大小 发行日期
def spider_info(info):
    dic2 = {}
    for i in info:
        for key, value in i.items():
            soup = web_content(value)
            #soup1代表的是游戏的大小 发行日期  soup2代表的是游戏的介绍信息
            soup1 = soup.find('p',{'class':'game_info_f1'})
            dic_temp = {}
            if soup1 is not None:
                #查找所有的子标签 span
                info = soup1.find_all('span')
                del info[0]  #删除第一个元素，因为这个不是我们想要的
                for i in info:
                    i = i.text
                    detail_info = i.split("：")
                    dic_temp[detail_info[0]] = detail_info[1]
                intro = soup.find('div', {'class': 'game_desc'})
                if intro is not None:
                    introduce = intro.find(text=True).strip()
                    dic_temp["介绍"] = introduce
                    dic2[key] = dic_temp
                print(dic_temp)
            else:
                dic_temp["大小"] = "无"
                dic_temp["日期"] = "无"
                dic_temp["介绍"] = "无"
                dic2[key] = dic_temp
    return dic2
if __name__ == '__main__':
    #定义列表 用于保存所有的动作游戏以及它们的超链接
    all_game = []
    info = None
    #爬取所有的页面所推荐的益智游戏
    for page in range(0,50):
        url = 'http://www.7k7k.com/yizhi/index_'+str(page+1)+'.htm'
        info = spider_all_action(web_content(url))
    # url = 'http://www.7k7k.com/flash/197891.htm'
    d = spider_info(info)
    # with open('yizhi_intro.txt', 'w', encoding='utf-8') as f:
    #     f.write(json.dumps(d,ensure_ascii=False))
    print(d)