import requests as rq
url = 'https://www.douban.com/doulist/2772079/'
header = {'User-Agent':'Chrome/55.0.2883.75'}#模拟浏览器
information = rq.get(url,headers=header)
doc = information.text


from bs4 import BeautifulSoup
from lxml import html
soup = BeautifulSoup(doc,'html.parser')
data = soup.find_all('div',class_="title")
for i in data:
    selector = html.fromstring(str(i))
    info = selector.xpath('//div[@class="title"]/text()')
    print(info[0])
    print(i)


    