import requests
from bs4 import BeautifulSoup


# # 添加headers请求头
# headers = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit'}
# # get请求没有这个,post请求才有
# data={'user':'abc','password':"123"}
# r = requests.get('https://www.weibo.com/', headers=headers,data=data)
#
r=requests.get('https://www.baidu.com')
# 返回请求码
print(r.status_code)
# 返回请求的代码
print(r.text)
# 返回ajax的json数据
# print(not r.json())
#
#
# # 保持会话
# # 新建一个session 对象
# swss=requests.session();
# # 先完成登录
# swss.post('maybe a login url',data=data,headers=headers)
# # 然后再这个会话下去访问其他的网址
# swss.get('other urls')

html = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title" name="dromouse"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1"><!-- Elsie --></a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
# 选用lxml解析器来解析
soup = BeautifulSoup(html, "html.parser")
# 获取标题
print(soup.title)
#获取文本
print(soup.title.text)
#通过标签定位
print(soup.find_all('a'))
#通过属性定位
print(soup.find_all(attrs={'id':'link1'}))
#标签+属性定位
print (soup.find_all('a',id='link1'))



print("百度的代码")

# 解析百度的代码
baidu=BeautifulSoup(r.text,"html.parser")
# 获取标题
print("百度标题"+baidu.title.text)
#通过标签定位
print(baidu.find_all('div'))



# 下载17173新游频道下载游戏名
# 页面url地址
url = 'http://newgame.17173.com/game-list-0-0-0-0-0-0-0-0-0-0-1-2.html'

# 发送请求，r为页面响应
game = requests.get(url)

# 用game.text获取该页面的代码
# 然后解析该页面的代码
gameRequest=BeautifulSoup(game.text,"html.parser")

# 定位到这个名字地方的区域（根据标签定位）
insert=gameRequest.find_all(attrs={"class":"ptlist ptlist-pc"})
print(insert)
tit=insert[0].find_all(attrs={"class":"tit"})
for title in tit:
    if(title!="视频"):
        print(title.text.replace("\n",""))
    continue


