# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 13:23:27 2024

@author: 32691
"""

from bs4 import   BeautifulSoup
import requests
import pandas as pd
import re
from openpyxl import Workbook


#打开文件
with open('info.txt', 'r') as file:  
    # 使用readlines()方法读取所有行
    lines = file.readlines()
    first_url = lines[0].strip()
    second_url = lines[1].strip()
    third_url = lines[2].strip()
    print(first_url)
    print(second_url)
    print(third_url)
    
#创建3个工作簿
workbook = Workbook()
first_sheet = workbook.active
first_sheet.title = "陈华" 
second_sheet = workbook.create_sheet(title="宋允全")
third_sheet = workbook.create_sheet(title="GDP")

#模拟请求头1
first_header = {
       "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
        "Cookie":"JSESSIONID=29DB1B37459E74837C14147FC7C34F3E"
           }
first_sess = requests.session()
first_response = first_sess.get(first_url, headers=first_header)
first_response.encoding = 'utf-8'  # 指定编码
#模拟请求头2
second_header = {
       "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
        "Cookie":"JSESSIONID=F11B0C675DCCCDBBC24F837A5577F6E4"
           }
second_sess = requests.session()
second_response = second_sess.get(second_url, headers=second_header)
second_response.encoding = 'utf-8'  # 指定编码
#模拟请求头3
third_header = {
       "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
        "Cookie":"Hm_lvt_bbb87091aa9f5e84eda2a6042eb4842b=1712672452; Hm_lpvt_bbb87091aa9f5e84eda2a6042eb4842b=1712674938; Hm_lvt_e6412cfc059a1c0a7d30b1915c762c86=1712672452; Hm_lpvt_e6412cfc059a1c0a7d30b1915c762c86=1712674938"
           }
third_sess = requests.session()
third_response = third_sess.get(third_url, headers=third_header)
third_response.encoding = 'utf-8' 

#检查响应状态
if first_response.status_code != 200:
    print("Failed to get the page:", first_response.status_code)
    exit()
if second_response.status_code != 200:
    print("Failed to get the page:", second_response.status_code)
    exit()
if third_response.status_code != 200:
    print("Failed to get the page:", third_response.status_code)
    exit()
    
    
    
#处理第一个网址

#解析html 1
first_soup = BeautifulSoup(first_response.text, 'html.parser')

# 查找网页中的所有表格
specific_table = first_soup.find('table', class_='MsoNormalTable')
    # 找到table标签中的所有tr标签
first_tr_tags = specific_table.find_all('tr')
    # 遍历所有tr标签
for first_row_index, first_tr in enumerate(first_tr_tags, start=1):
        # 找到tr标签中的所有td或th标签
    first_td_tags = first_tr.find_all(['td', 'th'])
        # 遍历所有td或th标签
    for first_col_index, first_td in enumerate(first_td_tags, start=1):
            # 将td或th标签的文本内容写入Excel表格中
        first_sheet.cell(row=first_row_index, column=first_col_index, value=first_td.get_text())
        
        
#处理第二个网址

#解析html 2
second_soup = BeautifulSoup(second_response.text, 'html.parser')

# 查找网页中的所有表格
second_specific_table = second_soup.find('table', class_='article')
    # 找到table标签中的所有tr标签
second_tr_tags = second_specific_table.find_all('tr')
    # 遍历所有tr标签
for second_row_index, second_tr in enumerate(second_tr_tags, start=1):
        # 找到tr标签中的所有td或th标签
    second_td_tags = second_tr.find_all(['td', 'th'])
        # 遍历所有td或th标签
    for second_col_index, second_td in enumerate(second_td_tags, start=1):
            # 将td或th标签的文本内容写入Excel表格中
        second_sheet.cell(row=second_row_index, column=second_col_index, value=second_td.get_text())
        
#解析html 3
third_soup = BeautifulSoup(third_response.text, 'html.parser')

# 查找网页中的所有表格
third_specific_table = third_soup.find('table', class_='table-ui')
    # 找到table标签中的所有tr标签
third_tr_tags = third_specific_table.find_all('tr')
    # 遍历所有tr标签
for third_row_index, third_tr in enumerate(third_tr_tags, start=1):
        # 找到tr标签中的所有td或th标签
    third_td_tags = third_tr.find_all(['td', 'th'])
        # 遍历所有td或th标签
    for third_col_index, third_td in enumerate(third_td_tags, start=1):
            # 将td或th标签的文本内容写入Excel表格中
        third_sheet.cell(row=third_row_index, column=third_col_index, value=third_td.get_text())
        
        
# 保存Excel文件
excel_file = 'write.xlsx'
workbook.save('write.xlsx')

# 显示DataFrame的前几行数据
print(f'Data has been written to {excel_file}')
        




























