import requests


#cookie登录
# url = "https://www.zhihu.com/hot"
# header = {
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
#     "Cookie":'''_zap=1ea2b3db-8be5-4dba-a409-331e2fdfb8ac; d_c0="ANCemGqzKhGPTl0yCQBx3kcy6G1qxbi-_vk=|1587703381"; _ga=GA1.2.1519934413.1587703380; z_c0="2|1:0|10:1587703488|4:z_c0|92:Mi4xbHVCdUJnQUFBQUFBMEo2WWFyTXFFU1lBQUFCZ0FsVk53THlQWHdCLUVDVXN4QzdnNWl0UFdoUjBvWnlzc1h6R3FR|77e0d4c7514c5c365567f127de0668f770b6015487b34f988c8b90b461ce8e32"; q_c1=39345a5ca1d246949ded58fbedb084ca|1591670779000|1588176902000; _xsrf=6CHMrsXlM5TQjLNvmrZftctiGx5x3a8k; tshl=; tst=h; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1592812658,1592835200,1592923643,1593227092; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1593227092; _gid=GA1.2.2117050839.1593227093; _gat_gtag_UA_149949619_1=1; SESSIONID=iuB3jLwGraqYEbzvMEFEEHWBBL3UPLz7o6bCcAm4ayD; JOID=VVASA0n9Oc8BQVt6cfpzlSXzXg1gm0OKMC4YAjGKV7FfBjwPCKqGPFhLW3J07qmFblK-jw5Bhs9p0Q5l51XbhT8=; osd=VFoVAUz8M8gDRFpwdvh2lC_0XAhhkUSINS8SBTOPVrtYBDkOAq2EOVlBXHBx76OCbFe_hQlDg85j1gxg5l_chzo=; KLBRSID=ed2ad9934af8a1f80db52dcb08d13344|1593227141|1593227091'''
# }
#
# response = requests.get(url, headers=header)
# print(response.cookies.get_dict())
# print(response.text)


# session会话对象
# header = {
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
# }
# login_url = "https://www.yaozh.com/login"
# login_form_data = {
#     "username": "lucky_yu",
#     "pwd": "199213xiao..",
#     "formhash": "4E3A4A3F97",
#     "backurl": "https%3A%2F%2Fwww.yaozh.com%2F"
# }
# session = requests.Session()
# login_response = session.post(login_url, headers=header, data=login_form_data)
# print(login_response.text)
#
# member_url = "https://www.yaozh.com/member/"
# member_response = session.get(member_url,headers=header)
# print(member_response.text)



# xpath
from lxml import etree
# url = "https://www.baidu.com/"
url = "https://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD"
params = {'kw':'中国'}

'''//table[@class="c-table opr-toplist1-table"]/tbody[1]/tr/td/span/a[@class='opr-toplist1-cut']'''
header = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
}
response = requests.get(url,headers=header,params=params)
print(response.text)

# html = etree.HTML(response.text)  # 将字符串解析为html文档
#
# result = etree.tostring(html).decode('utf-8')  # 按字符串序列化HTML
# print(result)

# reade = html.xpath('''//table[@class="c-table opr-toplist1-table"]/tbody[1]/tr/td/span/a[@class='opr-toplist1-cut']/text()''')
# print(reade)





# 读取文件
# 如果碰到不规范的html文件时就会解析错误 需要通过HTMLParser自己创建html解析器
# parser = etree.HTMLParser(encoding="utf-8")
#
# read_html = etree.parse(r'02.html',parser=parser)
# result_read = etree.tostring(read_html).decode('utf-8')
# print(result_read)


# 基本操作
# html = read_html.xpath('//div')  # 获取所有div
# for i in html:
#     print(etree.tostring(i).decode('utf-8'))


# html = read_html.xpath('//div/@class')  # div下所有class属性 列表
# print(html)


# html = read_html.xpath('//head/link[@href="https://www.baidu.com/img/baidu.svg"]')
# print([etree.tostring(i) for i in html])


# html = read_html.xpath('//div//span')  # div下所有span标签
# print(html)

# html = read_html.xpath('//li/a//@class')
# print(html)


# html = read_html.xpath('//li[last()]/a/@href')  # last()最后一个
# print(html)

# html = read_html.xpath('//li[last()-1]/a')  # 倒数第二个last()-1 获取内容
# print(html[0].text)   # 获取内容

# html = read_html.xpath('//li[last()-1]/a/text()')  #
# print(html)   # 获取内容 ['fourth item']