import re
import os
import sys
import inspect
from bs4 import BeautifulSoup

# 提示'gbk' codec can't decode byte时，用rb方式打开文件
soup_douban = BeautifulSoup(open('D:\\BaiduNetdiskDownload\\爬虫从入门到应用\\2022_06_08直播数据解析_正则\\素材\\豆瓣.html', 'rb'), 'lxml')

# 先观察html，每一个书名和书的简介都在div标签内，且class属性值都是detail-frame，同时满足这两个的筛选，先获取到页面上每一个书对应的所有信息
all_bookInfo = soup_douban.find_all('div', class_='detail-frame')
with open("getBookName_byBS4.txt", 'w') as f:
    for item in all_bookInfo:
        f.writelines(item.a.string + '\n')      # 书信息列表里标签<a>内有书名，.string或.text获取内容
f.close()

all_img = soup_douban.find_all('img')
with open("img.html", 'w') as ff:
    ff.writelines(str(all_img))
ff.close()

all_bookIntro = soup_douban.find_all('p', class_='detail')
# 提示错误nicodeEncodeError: 'gbk' codec can't encode character '\u2022' in position 37: illegal multibyte sequence时，加编码方式打开文件即可
with open("getBookIntro_byBS4.txt", 'w', encoding='utf-8') as fff:
    for intros in all_bookIntro:
        fff.writelines(str(intros.string))      # bs4生成的对象时navigabstring类型，需转换成string可写入文件
fff.close()

# find_all传正则表达，bs4以match匹配，但并不是从头开始满足17行匹配，因此返回空列表，姑且认为传正则的bs4搜索方式不常用
print(soup_douban.find_all(href=re.compile('<a href="https://book.douban.com/subject/\d+/">(.*?)</a>')))

soup_gupiao = BeautifulSoup(open('D:\\BaiduNetdiskDownload\\爬虫从入门到应用\\2022_06_08直播数据解析_正则\\素材\\股票.html', 'rb'), 'lxml')
# gupiao_title = soup_gupiao.find('thead', attrs={'class':'tbody_right'})   # attrs的用法，标签含多个属性时。用attrs
gupiao_title = soup_gupiao.find('thead', class_='tbody_right')              # thead标签只有class属性，不用attrs，直接用class_
title_lst = re.split('\s', gupiao_title.text)        # .text，.get_text()，都能获取所有文字部分，用正则字符串分割，以空白字符分割，\s是所有空白字符
title_lst = [row for row in title_lst if row]        # 分割后再去掉空元素
file = open('gupiaoInfo_byBS4.txt', 'w', encoding='utf-8')
file.truncate(0)

for i in title_lst:
    file.writelines(i + '\t\t\t\t')

file.writelines('\n')

detail_info = soup_gupiao.find_all('tbody', attrs={'class':'tbody_right', 'id':'datalist'})
shouldNewLine = 0
with open('gupiaoInfo_byBS4.txt', 'a', encoding='utf-8') as file:
    for i in detail_info:
        for info in i.find_all('td'):
            shouldNewLine += 1
            file.writelines(info.string + '\t\t\t')
            if shouldNewLine == 6:
                file.writelines('\n')
                shouldNewLine = 0
    # for intro in i.find_all('td', class_='align_center'):
    #     print(intro.string)
    # for value in i.find_all('td', class_='align_right'):
    #     print(value.string)

f_sanguo = open('D:\\BaiduNetdiskDownload\\爬虫从入门到应用\\2022_06_10_直播数据解析_bs4\\素材\\三国演义.html', 'r', encoding='utf-8')
soup_sanguo = BeautifulSoup(f_sanguo, 'lxml')       # 将html作为文件生成bs4对象

sanguo_mulu = soup_sanguo.find_all('div', class_='book-mulu')   # 获取div标签的book-mulu属性
with open('三国章节.html', 'w', encoding='utf-8') as fff:
    for k in sanguo_mulu:       # 获取div标签，book-mulu属性的所有内容
        for episode in k.find_all('a'):     # 继续获取上述标签下标签为a的所有内容
            fff.write(str(episode) + '\n' + '<br>')  # episode是Tag，栓换为字符串写入文件，'\n'在html里不作为换行，<br>在html里是换行

html_tianqi = open('D:\\BaiduNetdiskDownload\\爬虫从入门到应用\\2022_06_10_直播数据解析_bs4\\素材\\匹配天气.html', 'r', encoding='utf-8')
soup_tianqi = BeautifulSoup(html_tianqi, 'lxml')
citys = soup_tianqi.find_all(href=re.compile('http://www.weather.com.cn/weather/\d+.shtml'), target='_blank')       # 关键字+正则+多属性查找
highs = soup_tianqi.find_all('td', width='92')      # 关键字查找没有整形，一般用字符串表达关键字的值
lows = soup_tianqi.find_all('td', width='86')

myCitys = []
myHighs = []
myLows  = []

# 去掉多余匹配内容，重新制作列表
for city in citys:
    if city.string != '详情':
        myCitys.append(city.string)
myCitys.insert(0, '城市')

for high in highs:
    if high.string.isdigit():
        myHighs.append(high.string)
myHighs.insert(0, '最高')

for low in lows:
    if low.string.isdigit():
        myLows.append(low.string)
myLows.insert(0, '最低')

with open('城市天气.txt', 'w', encoding='utf-8') as f_tianqi:
    for i in range(0, len(myCitys)):
        if i == 1:
            f_tianqi.write('============================\n')
        f_tianqi.write(myCitys[i] + '\t\t\t' + myHighs[i] + '\t\t\t' + myLows[i] + '\n')

# def get_line_num():
#     return 'line:' + str(sys._getframe().f_lineno)
#
# # def print_line():
# #     line_num = inspect.currentframe().f_lineno
# #     print(f'now line: {line_num}')      # f格式化字符串，{}内传入字符串
#
# html_doc = """
# <html><head><title>The Dormouse's story</title></head>
# <body>
# <p class="title"><b>The Dormouse's story</b></p>
#
# <p class="story">Once upon a time there were three little sisters; and their names were
# <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
# <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
# <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
# and they lived at the bottom of a well.</p>
#
# <p class="story">...</p>
# """
#
# # f = open('D:\\BaiduNetdiskDownload\\爬虫从入门到应用\\2022_06_10_直播数据解析_bs4\\素材\\大学排名.html', 'r', encoding='utf-8')
# # htmlStr = f.read()
# # f.close()
#
# soup = BeautifulSoup(html_doc, 'html.parser')   # 用lxml解释器解析，比html.parser更好
# # soup = BeautifulSoup(html_doc, 'lxml')   # 用lxml解释器解析，比html.parser更好
#
# # print(soup.get_text())      # 获取所有文字内容
# # 获取超链接
# # print(soup.a)     # 返回第一个a标签
# # for link in soup.find_all('a'):     # find_all返回所有
# #     # print(link)
# #     print(link.get('href'))
# #     print(link.string)
#
# # print(soup.head, type(soup.head))
# # print(soup.head.string)     # 和36行一样
# # print(soup.title, type(soup.title))
# # print(soup.title.string)
# # print(soup.p)
# print(soup.a.string)
# print(soup.p.text, str(sys._getframe().f_lineno))
# # print(soup.p['class'], type(soup.p['class']))      # p标签的class属性值，返回列表
# # print(soup.p.string)    # 和53行一样
# # print(soup.p.strings)
# for i in soup.p.strings:
#     print(i, get_line_num())     # 获取行号
# # print_line()
# # print(soup.p.b)
# # print(soup.p.b.string)
# # print(soup.find_all('p')[1].string)
# for k in soup.find_all('p'):
#     print(k)        # 发现第二个人元素有多个字符串和p标签，都是子节点，.string获取不到文本内容，可以用.strings加遍历的方式
# for j in soup.find_all('p')[1].stripped_strings:    # 去除多余空白内容，\r,\n,\t, ' '
#     print(j, get_line_num())
# for item in soup.find_all('p')[1].strings:
#     print(item, get_line_num())
# print(soup.find_all('p')[1].text)       # .text和.strings加遍历一样，返回一个字符串
# print(len(soup.find_all('p')[1].contents))      # .contents以列表的形式返回所有子节点，可通过获取.contents的长度的方式判断一个tag有没有子节点
# ''' 小结：1).string会把左右标签都过滤掉，直接获取该标签的文字部分
#          2)有子节点，.string获取不到文本内容，可以用.strings加遍历
#          3).text和.strings加遍历效果一样，.text用得更多
# '''
#
# print(soup.a.attrs, type(soup.a.attrs))     # 第一个超链接的所有属性，也就是tag的属性，返回字典
# for keyValue in soup.a.attrs:
#     print(soup.a.attrs[keyValue])
#
# # tag的属性操作和字典一样
# # 修改
# soup.a['class'] = ['bro']
# print(soup.a.attrs, type(soup.a.attrs))
# for keyValue in soup.a.attrs:
#     print(soup.a.attrs[keyValue])
#
# # 删除
# del soup.a['class']
# print(soup.a.attrs, type(soup.a.attrs))
# for keyValue in soup.a.attrs:
#     print(soup.a.attrs[keyValue])
#
# # 添加
# soup.a['myKey'] = '小英是笨蛋'
# print(soup.a.attrs, type(soup.a.attrs))
# for keyValue in soup.a.attrs:
#     print(soup.a.attrs[keyValue])
#
# soup.a.string = soup.a.attrs['myKey']
#
# with open('myHtml.html', 'w', encoding='utf-8') as f:
#     f.write(str(soup))
# f.close()
#
# html_test = '<a href="http://example.com/elsie" class="sister" id="link1"><!--Elsie--></a>'
# soup2 = BeautifulSoup(html_test, 'lxml')
# print(soup2.a.string, type(soup2.a.string))     # <!-- ***-->是注释，用html.parser时内容前要有空格，用lxml时不需要，但.string获取时自动去掉了注释，类型为comment，也就是注释
#
# print(soup.find_all(['a','p']))
# print(soup.find_all(['a','p'])[0])
# print(soup.find_all(href=re.compile('Lacie')))
# print(soup.find_all(href=re.compile('http://'))[0])
# print(soup.find_all(href=re.compile('^http://')))
# print(soup.find_all(href=re.compile('^http://'), id='link1'))   # 多属性查询
# print(soup.find_all(string="Lacie"))        # 为什么用text作为keyword不行？
# print(soup.select("title"))
