# from bs4 import BeautifulSoup
# from lxml import html
# import xml
# import requests
#
# url = "https://movie.douban.com/chart"
# f = requests.get(url)  # Get该网页从而获取该html内容
# soup = BeautifulSoup(f.content, "lxml")  # 用lxml解析器解析该网页的内容, 好像f.text也是返回的html
# # print(f.content.decode())								#尝试打印出网页内容,看是否获取成功
# # content = soup.find_all('div',class_="p12" )   #尝试获取节点，因为calss和关键字冲突，所以改名class_
#
# for k in soup.find_all('div', class_='pl2'):  # ,找到div并且class为pl2的标签
#     a = k.find_all('span')  # 在每个对应div标签下找span标签，会发现，一个a里面有四组span
#     print(a[0].string)


##############################################################
import urllib.request
from lxml import etree

url = 'https://movie.douban.com/top250'


def ask(url):
    head = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
    req = urllib.request.Request(url, headers=head)
    response = urllib.request.urlopen(req)
    html = response.read().decode('utf-8')
    return html

html_content = ask(url)

content = etree.HTML(html_content)
ret = content.xpath('//span[@class="title"]/text()')
for x in ret:
    if "\xa0" in x:
        ret[ret.index(x)] = x.replace("\xa0", '')
    else:
        pass
# print(ret)

str_content = etree.tostring(content)
print(str_content.decode('utf-8'))
