# -*- coding:utf-8 -*-
import requests
from lxml import etree
import sys
import io
import time

#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') #改变标准输出的默认编码
def lxml_find(html):
    '''用lxml解析'''
    start = time.time()						# 三种方式速度对比
    selector = etree.HTML(html)        # 转换为lxml解析的对象
    titles = selector.xpath('//*[@class="baijia-focus-list"]/ul/li/a.text')    # 这里返回的是一个列表
    for each in titles:
        print(each.text)
#         title = each.strip()        # 去掉字符左右的空格
#         print(title)
    end = time.time()
    print('lxml耗时', end-start)

#登录后才能访问的网页
url = 'https://news.baidu.com/'

#浏览器登录后得到的cookie，也就是刚才复制的字符串
cookie_str = r'JSESSIONID=xxxxxxxxxxxxxxxxxxxxxx; iPlanetDirectoryPro=xxxxxxxxxxxxxxxxxx'

#把cookie字符串处理成字典，以便接下来使用
cookies = {}
for line in cookie_str.split(';'):
    key, value = line.split('=', 1)
    cookies[key] = value

#设置请求头
headers = {'User-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}

#在发送get请求时带上请求头和cookies
resp = requests.get(url, headers = headers, cookies = cookies)
# print(resp.content.decode('utf-8'))
# print(resp.text)
lxml_find(resp.text)
# post
# data = {'text':''}
# r = requests.post(url,data=data,headers = headers,)
#response = r.json()
# print (r.text)
#
# r = requests.post(url,data=data)
