# coding:utf-8
import requests
from lxml import html  #这里我们用lxml，也就是xpath的方法


#豆瓣模拟登录，最简单的是cookie，会这个方法，80%的登录网站可以搞定
cookie = {}

raw_cookies = 'gr_user_id=6b9674c7-9056-4619-8d12-86b23d13c3c0; bid=KWLB_nfSvRg; douban-fav-remind=1; ' \
              'douban-profile-remind=1; ll="118172"; dbcl2="52778075:yY0ujQ8RIhc"; _ga=GA1.2.1235692474.1525693668; ' \
              'ct=y; _vwo_uuid_v2=D065313689CBFFE9F4B043459954D0DBF|fcb1e80171b0bd482f4668736fb45b47; ck=hPPs; ' \
              'ap_v=0,6.0; push_noty_num=0; push_doumail_num=0; __utmt=1; __utma=30149280.1235692474.1525693668.1537721322.1537762453.318; _' \
              '_utmb=30149280.2.10.1537762453; __utmc=30149280; __utmz=30149280.1537702505.316.12.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; ' \
              '_utmv=30149280.5277' #引号里面是你的cookie，用之前讲的抓包工具来获得

# for line in raw_cookies.split(';'):
#     key,value = line.split("=", 1)
#     cookie[key] = value #一些格式化操作，用来装载cookies


# 重点来了！用requests，装载cookies，请求网站
page = requests.get("https://www.douban.com/people/sweetheartx/", cookies = cookie)


#对获取到的page格式化操作，方便后面用XPath来解析
tree = html.fromstring(page.text)

# XPath解析，获得你要的文字段落
intro_raw = tree.xpath('//span[@id="intro_display"]/text()')

print(intro_raw)
