# import requests
# from lxml import etree
# import csv
# def get_html(url,time=30):
#     head = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"}
#     try:
#         r=requests.get(url=url,params=date,headers=head)
#         r.encoding=r.apparent_encoding
#         r.raise_for_status()
#         return r.text
#     except Exception as e:
#         print(e)
# def parser(html):
#     doc = etree.HTML(html)
#     out_list=[]
#     for row in doc.xpath('//*[@id="book-img-text"]/ul/li/div[2]'):
#         row_data=[
#             row.xpath("h4/a/text()")[0],
#             row.xpath('p[1]/a[1]/text()')[0],
#             row.xpath("p[2]/text()")[0].strip(),
#             row.xpath("p[3]/span/text()")[0]
#         ]
#         out_list.append(row_data)
#     return out_list
# def save_csv(path,content):
#     with open(path,"a+",newline='',encoding="utf-8")as l:
#         csv_write=csv.writer(l)
#         csv_write.writerows(content)
# if __name__ == '__main__':
#     for t in range(1,6):
#         url = 'http://www.bspider.top/qidian/'
#         date={'page':t}
#         html=get_html(url)
#         out_list=parser(html)
#         path='d:\\赵华超.csv'
#         save_csv(path,out_list)
#
from bs4 import BeautifulSoup
# 导入方式
from bs4 import BeautifulSoup

# （1）etree.HTML(文件)  （2）xpath定位提取
# （1）实例化一个BeautifulSoup对象，将我们想要解析的内容（源码）加载到这个对象中，利用‘lxml’解析器
# （2）需要对标签、属性通过一些方法进行提取，提取想要的内容

# 实例化对象：（1）：本地文件：soup = BeautifulSoup(open(文件名，encoding='utf-8'),'lxml')
#(2)服务器响应文件：soup = BeautifulSoup(r.text,'lxml')

soup = BeautifulSoup(open('1.html',encoding='utf-8'),'lxml')

# 重头戏：定位提取
# soup.标签  :返回的是第一个标签内容
# print(soup.li)

# soup.标签.attrs:返回第一个标签的所有属性以及属性值：attrs:属性
# print(soup.li.attrs)


# bs4中有一些函数
# （1）find函数：元素对象
# 1.soup.find('标签')=soup.标签：返回第一个标签内容
# print(soup.find('li'))
# print(soup.find('a'))

# 2.精确查找
# soup.find('标签'，属性=属性值)
# print(soup.find('a',title='百度'))
# print(soup.find('a',class_='a2'))

# (2)find_all函数:返回的是列表对象
# 1.soup.find_all('标签')
# print(soup.find_all('li'))
# print(soup.find_all('a'))

# 2.一次找多个标签(放到列表中)：
# soup.find_all([标签1，标签2，。。。。])
# print(soup.find_all(['li','a']))

# 3.前几个标签：soup.find_all('标签'，limit=数字)
# print(soup.find_all('li',limit=3))

# (3)select函数：(推荐)
# 1.soup.select(标签)=find_all(标签)：返回的是列表
# print(soup.select('li'))

# 类选择器：class,用"."表示class,通过.值找到对应标签
# print(soup.select('.a1'))
# print(soup.select('.a2'))
# print(soup.select('.a3'))

# id是唯一：用‘#’表示id:#值
# print(soup.select('#冀A'))
# print(soup.select('#冀J'))
# print(soup.select('#jd'))
# print(soup.select('#c3'))

# 属性选择器
# soup.select(标签[属性])
# print(soup.select('li[id]'))
# # 进一步精确：soup.select(标签[属性=值])
# print(soup.select('li[id="冀A"]'))
# print(soup.select('li[id="冀J"]'))

# 层级选择器（找儿子，找孙子）
# （1）找儿子：子代选择器:'>'
# print(soup.select('div>ul>li'))
# (2)找孙子：后代选择器：' '
# print(soup.select('div li'))

# 获取标签文本：<标签>文本内容</标签>：text
# print(soup.select('span')[1].text)            #[1,2,3]:下标索引
# print(soup.select('span')[0].text)            #[1,2,3]:下标索引
# print(soup.select('li')[3].text)


# 获取属性值：attrs['属性名']
print(soup.select('a')[0].attrs['href'])
print(soup.select('a')[1].attrs['title'])

