#一、天气数据爬取

'''
import requests
from bs4 import BeautifulSoup
import csv


def getHTMLText(url):
    try:
        r = requests.get(url, timeout=30)
        r.raise_for_status()  # 检查请求状态
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print(f"请求错误: {e}")
        return None


def getWeatherData(text):
    soup = BeautifulSoup(text, 'html.parser')

    # 提取当天的数据
    today_item = soup.find('div', class_='pull-left day actived')
    weather_data = []

    # 处理当天数据
    if today_item:
        try:
            date_item = today_item.find('div', class_='day-item').get_text(strip=True)
            weather_condition = today_item.find_all('div', class_='day-item')[2].get_text(strip=True)
            wind_direction = today_item.find_all('div', class_='day-item')[3].get_text(strip=True)
            wind_speed = today_item.find_all('div', class_='day-item')[4].get_text(strip=True)
            high_temp = today_item.find('div', class_='high').get_text(strip=True)
            low_temp = today_item.find('div', class_='low').get_text(strip=True)
            day_icon = today_item.find('div', class_='dayicon').img['src'] if today_item.find('div',
                                                                                              class_='dayicon') else ""
            night_icon = today_item.find('div', class_='nighticon').img['src'] if today_item.find('div',
                                                                                                  class_='nighticon') else ""

            weather_data.append([
                date_item,
                weather_condition,
                high_temp,
                low_temp,
                wind_direction,
                wind_speed,
                day_icon,
                night_icon
            ])
        except Exception as e:
            print(f"当天数据解析错误: {e}")

    # 提取其余数据
    other_days = soup.find_all('div', class_='pull-left day')
    for item in other_days:
        if "actived" in item.get('class', []):
            continue  # 跳过已处理的当天数据

        try:
            date_item = item.find('div', class_='day-item').get_text(strip=True)
            weather_condition = item.find_all('div', class_='day-item')[2].get_text(strip=True)
            wind_direction = item.find_all('div', class_='day-item')[3].get_text(strip=True)
            wind_speed = item.find_all('div', class_='day-item')[4].get_text(strip=True)
            high_temp = item.find('div', class_='high').get_text(strip=True)
            low_temp = item.find('div', class_='low').get_text(strip=True)
            day_icon = item.find('div', class_='dayicon').img['src'] if item.find('div', class_='dayicon') else ""
            night_icon = item.find('div', class_='nighticon').img['src'] if item.find('div', class_='nighticon') else ""

            weather_data.append([
                date_item,
                weather_condition,
                high_temp,
                low_temp,
                wind_direction,
                wind_speed,
                day_icon,
                night_icon,
                ""
            ])

        except Exception as e:
            print(f"数据解析错误: {e}")

    return weather_data


def saveToCSV(data, filename='weatherdata.csv'):
    headers = ["日期", "天气", "最高温（℃）", "最低温（℃）", "风向", "风速", "白天图标", "夜间图标"]

    with open(filename, 'w', newline='', encoding='gbk') as f:
        writer = csv.writer(f)
        writer.writerow(headers)
        writer.writerows(data)


if __name__ == "__main__":
    url = "https://weather.cma.cn/web/weather/54525.html"  # 替换为你的URL
    text = getHTMLText(url)

    weather_data = getWeatherData(text)

    if weather_data:
        saveToCSV(weather_data)
        print("天气数据已成功保存到 weatherdata.csv 文件中，包含当天数据。")
'''


#二、对软科2024中国最好学科排名页面进行爬取

import requests
from bs4 import BeautifulSoup
import time
import urllib.parse
import csv
import logging

# 设置日志记录
logging.basicConfig(level=logging.INFO)

# 基础URL
base_url = 'https://www.shanghairanking.cn/rankings/bcsr/2024'


def fetch_university_rankings(sub_url):
    """
    访问每个子类URL并提取大学排名和大学名称的函数。
    """
    full_url = urllib.parse.urljoin(base_url, sub_url)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
    }
    rankings = []
    try:
        response = requests.get(full_url, headers=headers, timeout=10)
        response.raise_for_status()
        soup = BeautifulSoup(response.content, 'html.parser')

        # 查找所有包含排名信息的表格行
        table_rows = soup.find_all('tr')
        for row in table_rows:
            rank_cell = row.select_one('.ranking')
            name_cell = row.select_one('.name-cn')

            # 确保找到排名和大学名称的单元格
            if rank_cell and name_cell:
                rank = rank_cell.get_text(strip=True)
                university_name = name_cell.get_text(strip=True)
                rankings.append((rank, university_name))

        if not rankings:
            logging.warning(f"未找到排名信息：{full_url}")

    except requests.RequestException as e:
        logging.error(f"请求失败：{full_url}，错误信息：{e}")

    return rankings


def main():
    main_page_url = 'https://www.shanghairanking.cn/rankings/bcsr/2024'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
    }
    subject_data = []

    try:
        response = requests.get(main_page_url, headers=headers, timeout=10)
        response.raise_for_status()
        soup = BeautifulSoup(response.content, 'html.parser')

        # 查找所有包含学科信息的项
        subject_items = soup.select('.subject-item')
        for subject_item in subject_items:
            category_code = subject_item.select_one('.subject-code').get_text(strip=True)
            category_name = subject_item.select_one('.subject-title').get_text(strip=True)

            subcategories = []
            sub_links = subject_item.select('.subj-link')
            for sub_link in sub_links:
                sub_code = sub_link.find_all('span')[0].get_text(strip=True)
                sub_name = sub_link.find_all('span')[1].get_text(strip=True)
                sub_url = sub_link.get('href')

                rankings = fetch_university_rankings(sub_url)
                subcategories.append({
                    'sub_code': sub_code,
                    'sub_name': sub_name,
                    'sub_url': urllib.parse.urljoin(base_url, sub_url),
                    'rankings': rankings
                })

                time.sleep(0.5)  # 避免请求过于频繁

            subject_data.append({
                'category_code': category_code,
                'category_name': category_name,
                'subcategories': subcategories
            })

        # 保存数据到 CSV
        with open("subject.csv", mode='w', encoding='gbk', newline='') as file:
            writer = csv.writer(file)
            writer.writerow(
                ["Category Code", "Category Name", "Sub Code", "Sub Name", "Sub URL", "Rank", "University Name"])
            for subject in subject_data:
                for sub in subject['subcategories']:
                    for rank, university_name in sub['rankings']:
                        writer.writerow(
                            [subject['category_code'], subject['category_name'], sub['sub_code'], sub['sub_name'],
                             sub['sub_url'], rank, university_name])

        logging.info("数据已成功导出到 subject.csv")

    except requests.RequestException as e:
        logging.error(f"主页面请求失败，错误信息：{e}")


if __name__ == "__main__":
    main()
