import urllib.request
from bs4 import BeautifulSoup
import json

#请求网页页面内容函数
def web_content(url):
    # 请求页面地址
    req = urllib.request.Request(url)
    #增加头部信息
    req.add_header('User-Agent',
                   'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36')
    #读取网页内容 编码方式为gbk 此地方有一个坑
    html = urllib.request.urlopen(req).read().decode('gbk')
    soup = BeautifulSoup(html, features='lxml')
    return soup
#爬取游戏标签
def spider_maoxian(soup):
    #查询span标签下面class为tit的html内容
    soup = soup.find_all('span', {'class': 'tit'})
    #把游戏的各种类型显示出来 python列表进行删除元素，其返回值为删除掉的元素
    soup = soup.pop(1)
    #再进行一次数据清洗
    gameTag = soup.text
    return gameTag
#爬取所有的棋牌类游戏
def spider_qipaiGame(soup):
    # 使用class标签进行爬取
    soup = soup.find_all('ul',{'class':'list affix cf'})
    for item in soup:
        #print(item)
        #print(type(item))
        item = item.find_all('a')
        for game in item:
            #print(game.text)
            all_game.append(game.text)
    # print(all_game)
    return all_game
#爬取所有的当前游戏类型下面的网址(主要是页面的地址)
def spider_url(soup):
    soup = soup.find_all('div', {'class': 'pag'})
    # 获取自己需要的网页地址
    soup = soup.pop(1)
    #print(soup)
    soup = soup.find_all(name='a')
    # 删除列表当中的首位元素
    del soup[0]
    soup.pop()
    for item in soup:
        page_num.append(item.text)
    #判断 然后进行插入首页未显示的页面
    if int(page_num[-2])+1 != int(page_num[-1]):
        cha = int(page_num[-1]) - int(page_num[-2]) - 1
        for i in range(int(page_num[-2])+1,int(page_num[-1])):
            page_num_unexisted.append(i)
    print(page_num_unexisted)
    #print(soup)
    #爬取首页当中先出现的元素
    for item in soup:
        item = item['href']
        page_url.append(item)
if __name__ == '__main__':
    #以字典的形式进行保存所有的游戏，key值为游戏类型名，value值为该类型下面的所有游戏名
    qipai_game = {}
    all_game = []
    # 所有的游戏页面地址
    page_url = []
    # 保存页面页码的列表
    page_num = []
    # 保存在第一页未显示的页码
    page_num_unexisted = []
    str_suffix = '/flash_fl/7_1.htm'
    url = 'http://www.4399.com' + str_suffix

    # 首页进行请求
    soup = web_content(url)
    # 调用爬取游戏标签函数
    gameType = spider_maoxian(soup)
    # 爬取4399每个游戏类型下面第一页的信息
    spider_qipaiGame(soup)
    # 调用爬取所有页面地址的函数
    spider_url(soup)
    #print(page_url)

    u = page_url[-2]
    for i in page_num_unexisted:
        # 先保存一个需要替换的网页模板
        add_url = u.replace(page_num[-2], str(i))
        page_url.append(add_url)
    print(page_url)


    #循环遍历所有的页面地址进行爬取内容
    for suffix in page_url:
        url =  'http://www.4399.com'+suffix
        #print(url)
        content = web_content(url)
        spider_qipaiGame(content)
    #print(all_game)
    qipai_game[gameType] = all_game
    print(len(all_game))
    with open('maoxian_game.txt', 'w', encoding='utf-8') as f:
        f.write(json.dumps(qipai_game,ensure_ascii=False))
    f.close()
    print(qipai_game)