# import requests
# base_url='https://www.baidu.com/'

# response =requests.get(url=base_url)
# print(response.status_code)

#第一种


# import requests

# base_url='https://www.baidu.com/s'

# param='wd =python' 

# full_url=base_url +'?'+param

# reponse = requests.get(full_url)
# print(reponse.status_code)
#第二种

# import requests

# base_url='https://www.baidu.com/s'

# param={'wd' :'python'} 

# full_url=base_url +'?'+param

# reponse = requests.get(base_url,params=wd_param)
# print(reponse.status_code)
#第三种（我没有写完）



# import requests
# base_url='https://www.baidu.com/'

# response =requests.get(url=base_url)

# response.encoding='utf-8'
# print(response.status_code)



#获取照片
# import requests

# base_url="https://www.cdutetc.cn/site/cms_22/upload/image/20241011/1728610802537082076.jpg"
# response=requests.get(base_url)

# with open ('cdu_logo.png','wb') as file:#wb获取的时拿到是二进制数据
#     file.write(response._content)










#第一种方式
# import requests

# headers = {
#     'Cookie': 'C1805EC30AC172297F3FE0CB6E9E50D7:FG=1',      # 设置字段Cookie
#     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4)' 
#                   'AppleWebKit/537.36 (KHTML, like Gecko)'   
#                   'Chrome/53.0.2785.116 Safari/537.36',} # 设置字段User-Agent
# response=requests.get('https://www.baidu.com/',headers=headers)
# print(response.text)


#第二种方式cookies在外面
# import requests
# import requests.cookies

# headers = {
#           # 设置字段Cookie
#     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4)' 
#                   'AppleWebKit/537.36 (KHTML, like Gecko)'   
#                   'Chrome/53.0.2785.116 Safari/537.36',} # 设置字段User-Agent
# Cookie ='C1805EC30AC172297F3FE0CB6E9E50D7:FG=1'
# jar_obj=requests.cookies.RequestsCookieJar()

# for temp in Cookie.split(';'):
#     key,value= temp.split('=',1)
#     jar_obj.set(key,value)

# response=requests.get('https://www.baidu.com/',headers=headers,cookies=jar_obj)
# print(response.text)








#爬取小说此代码有问题

# import requests
# import os

# path = "./html"

# def load_page(url):
#     '''
#     作用：根据url发送请求，获取服务器响应文件
#     url:需要根据爬取的url地址
#     '''
#     headers ={"User-Agent":"Mozilla/5.0(compatible;MSIE 9.0;Windows NT 6.1;Trident / 5.0;)"}
#     request =requests.get(url,headers=headers)
#     return request.text
# def save_file(html,filename):
#     '''
#     作用：将html内容写入本地文件
#     html:服务器相应文件内容
#     '''
#     print("正在保存"+filename)
#     with open(os.path.join(path,filename),'w',encoding='utf-8') as file:
#         file.write(html)
# def get_html(begin_page,end_page):
#     '''
#     url:小王子的url
#     begin_page:起始页码
#     end_page:结束页

    
#     '''

#     for page in range(begin_page,end_page+1):
#         url=f'https://xiaoshuo.qq.com/read/1047950893/{page}'    
#         file_name ="第"+str(page) +"页.html"
#         html =load_page(url)
#         save_file(html,file_name)

# if __name__ == "__main__":
#     begin_page= int(input("请输入起始页："))
#     end_page =int(input("请输入结束页："))
#     get_html(begin_page,end_page)








#正确的爬取小说


# import requests  
# import os  
  
# path = "./html"  
  
# def ensure_directory_exists(directory):  
#     """确保目录存在，如果不存在则创建"""  
#     if not os.path.exists(directory):  
#         os.makedirs(directory)  
  
# def load_page(url):  
#     '''  
#     作用：根据url发送请求，获取服务器响应文件  
#     url:需要根据爬取的url地址  
#     '''  
#     headers = {"User-Agent": "Mozilla/5.0(compatible;MSIE 9.0;Windows NT 6.1;Trident / 5.0;)"}  
#     request = requests.get(url, headers=headers)  
#     return request.text  
  
# def save_file(html, filename):  
#     '''  
#     作用：将html内容写入本地文件  
#     html:服务器响应文件内容  
#     '''  
#     print("正在保存" + filename)  
#     full_path = os.path.join(path, filename)  
#     with open(full_path, 'w', encoding='utf-8') as file:  
#         file.write(html)  
  
# def get_html(begin_page, end_page):  
#     '''  
#     url:小王子的url  
#     begin_page:起始页码  
#     end_page:结束页  
#     '''  
#     ensure_directory_exists(path)  # 确保目录存在  
#     for page in range(begin_page, end_page + 1):  
#         url = f'https://xiaoshuo.qq.com/read/1047950893/{page}'      
#         file_name = "第" + str(page) + "页.html"  # 修正文件扩展名  
#         html = load_page(url)  
#         save_file(html, file_name)  
  
# if __name__ == "__main__":  
#     begin_page = int(input("请输入起始页："))  
#     end_page = int(input("请输入结束页："))  
#     get_html(begin_page, end_page)





#下面几个都是查找，解析
#正则发法
#import re
# html_content = """  
# <html>  
# <head><title>Example</title></head>  
# <body>  
#     <h1>This is a heading</h1>  
#     <p>This is a paragraph.</p>  
#     <h1>Another heading</h1>  
# </body>  
# </html>  
# """

# pattern = r'<h1>(.*?)</h1>'

# matches = re.findall(pattern,html_content,re.DOTALL)

# for match in matches:
#     print(match)



# from lxml import etree
# xml_doc = '''
# <bookstore>
#     <book>
#         <title lang="eng">Harry Potter</title>
#         <price>29.99</price>
#     </book>
#     <book>
#         <title lang="eng">Learning XML</title>
#         <price>39.95</price>
#     </book>
# </bookstore>
# '''
# root_node =etree.fromstring(xml_doc)
# print(root_node)
# print(root_node[:])#获取子节点
# print(root_node[0])#获取第一个子节点
# print(root_node[1])
# print(root_node.tag)


# res1 =root_node.find('.//price').text
# print(res1)
# res2 =root_node.findall('.//price')[0].text
# print(res2)
# res3 =root_node.findall('.//price')[0].text
# print(res3)



#BS4
# from bs4 import BeautifulSoup
# import re
# html_doc = """<html><head><title>The Dormouse's story</title></head>
# <body>
# <p class="title"><b>The Dormouse's story</b></p>
# <p class="story">Once upon a time there were three little sisters; 
# and their names were
# <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
# <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
# <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
# and they lived at the bottom of a well.</p>
# <p class="story">...</p>
# """
# suop= BeautifulSoup(html_doc,features='lxml')

# print(suop.find_all('title'))

# print(suop.find_all("title","a"))#

# print(suop.find_all(text="Elsie"))#

# from wordcloud import WordCloud
# import matplotlib.pyplot as plt








#生成词云


# with open("D:\VScode_work\html\第3页.html", "r", encoding="utf-8") as f:
#     text = f.read()
# wc = WordCloud(font_path="D:\VScode_work\网络爬虫\STXINGKA.TTF", background_color="white", \
#                width=500, height=400, margin=2).generate(text)

# fig, ax = plt.subplots(1, 1, figsize=(17, 12))
# ax.imshow(wc)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["left"].set_visible(False)
# ax.spines["bottom"].set_visible(False)

# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.show()
# wc.to_file("./pattern/wc.png")




#用正则来查找中文字，并生成词云

import re
from wordcloud import WordCloud
import matplotlib.pyplot as plt

# 读取HTML文件  
with open("html/第3页.html", "r", encoding="utf-8") as f:
    html_content = f.read()

# 使用正则表达式提取中文文本  
chinese_text = re.findall(r'[\u4e00-\u9fff]+', html_content)
chinese_text = ' '.join(chinese_text)  

# 生成词云  
wc = WordCloud(font_path="simhei.ttf",
               background_color="white",
               width=500,
               height=400,
               margin=2).generate(chinese_text)

# 显示词云  
fig, ax = plt.subplots(1, 1, figsize=(17, 12))
ax.imshow(wc)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()

# 保存词云为图片  
wc.to_file("./pattern/wc.png")