import requests
from bs4 import BeautifulSoup
from lxml import etree
import re


def get_douban_html():
    """获取豆瓣榜单网页数据存入本地文件中"""
    # 豆瓣新片榜
    url = "https://movie.douban.com/chart"
    # 请求头
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "movie.douban.com",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }
    # 发起请求
    data = requests.get(url=url, headers=headers)
    print('获取豆瓣榜单网页')
    print('状态码为', data.status_code)
    # 状态码
    if data.status_code == 200:
        soup = BeautifulSoup(data.content, 'lxml')
        # print(soup)
        file = open('001.html', 'w', encoding='UTF-8')
        file.write(str(soup))
        file.close()
        print('下载完成')
    else:
        print("请求出错: {}".format(data.status_code))


def read_douban_html():
    print('使用 html 解析网页')
    # file = open('001.豆瓣榜单.txt', 'a', encoding='utf-8')
    """执行解析文档的操作"""
    soup = BeautifulSoup(open('001.html', encoding='UTF-8'), 'lxml')
    # 拿到 每个 movie 的大节点
    movies_list = soup("tr", class_='item')
    for movie in movies_list:
        # file.write("# {}\n".format(movie.find('a', class_="nbg")['title']))
        # file.write("* 别名: [{}]".format())
        # print(movie.find('a', class_="nbg")['title'])
        # print()
        contents = movie.find('div', class_="pl2").find('a').contents

    # file.close()


# read_douban_html()


def read_douban_lxml():
    # 初始化生成一个XPath解析对象
    print('使用 lxml 解析网页')
    html = etree.HTML(open('001.html', encoding='UTF-8').read(), etree.HTMLParser())
    # 解析对象输出代码
    # result = etree.tostring(html, encoding='utf-8')
    # print(type(result))
    # print(type(html))
    # print(result.decode('utf-8'))
    string = ''
    item_list = html.xpath('//tr[@class="item"]')
    # r = html.xpath('//tr[@class="item"]//text()')
    for item in item_list:
        # print(item.xpath('.//a[@class="nbg"]/@title')[0])
        string += "----------\n[电影]: {}\n".format(item.xpath('.//a[@class="nbg"]/@title')[0])
        # print(item.xpath('.//div[@class="pl2"]/a/text()')[0].strip().strip('/').strip())
        string += "[别名]: {} / ".format(item.xpath('.//div[@class="pl2"]/a/text()')[0].strip().strip('/').strip())
        # print(item.xpath('.//div[@class="pl2"]/a/span/text()')[0])
        string += "{}\n".format("".join(item.xpath('.//div[@class="pl2"]/a/span/text()')))
        # print(item.xpath('.//div[@class="pl2"]/p[@class="pl"]/text()')[0])
        string += "[信息]: {}\n".format(item.xpath('.//div[@class="pl2"]/p[@class="pl"]/text()')[0])
        # print(item.xpath('.//div[@class="pl2"]//span[@class="rating_nums"]/text()')[0])
        string += "[评分]: {}\n".format(item.xpath('.//div[@class="pl2"]//span[@class="rating_nums"]/text()')[0])
    file = open('001.txt', 'w', encoding='utf-8')
    file.write(string)
    file.close()


# get_douban_html()
read_douban_lxml()


# '//a[@href="link2.html"]/parent::*/@class'

"""
    # 指定解析器HTMLParser会根据文件修复HTML文件中缺失的如声明信息
    html2 = etree.parse('001.html', etree.HTMLParser())
    # 解析成字节
    result2 = etree.tostring(html2)
    # 解析成列表
    # result2 = etree.tostringlist(html2)
    print(type(result2))
    print(type(html2))
    print(result2.decode('utf-8'))
"""


