#!/usr/bin/env python
# coding: utf-8

# In[7]:


get_ipython().system('pip install bs4')


# In[18]:


import pandas as pd
import requests
from bs4 import BeautifulSoup
from lxml import etree


# In[27]:


# 请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
    'Cookie': 'Hm_lvt_7069209d76184c3513ce3df5e48fdbd6=1711709747; Hm_lpvt_7069209d76184c3513ce3df5e48fdbd6=1711709769',
    'Host': 'www.bq90.cc',
    'Connection': 'keep-alive'
}

# 读取网址文件
with open('url.txt', 'r') as file:
    urls = file.readlines()

# 去除每行末尾的换行符
urls = [url.strip() for url in urls]

# 创建Excel写入对象
writer = pd.ExcelWriter('data.xlsx')


# In[28]:


# 遍历每个网址
for idx, url in enumerate(urls):
    print(f"Fetching data from URL {idx + 1}: {url}")
    
    # 发送GET请求获取网页内容
    resp = requests.get(url, headers)
    
    # 使用BeautifulSoup解析网页内容
    soup = BeautifulSoup(resp.content, 'html.parser')
    text = resp.content.decode('utf-8')
    html = etree.HTML(text)
    title = html.xpath('//*[@class="title"]/text()')[0]
    contents = html.xpath('//*[@id="chaptercontent"]/text()')
    
    # 获取网页标题作为sheet名称
    sheet_name = soup.title.string.strip() if soup.title else f'Sheet{idx + 1}'
    
    # 将网页文本内容写入Excel的一个sheet中
    df = pd.DataFrame({'Text': contents})
    df.to_excel(writer, sheet_name=sheet_name, index=False)

# 保存Excel文件
writer.save()
print("Data saved to Excel successfully.")


# In[ ]:




