import requests
from bs4 import BeautifulSoup
import pymysql
from pymysql import MySQLError
import logging

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 网页地址
url = 'http://tougao.xsbja.com/index.php?s=&c=content&a=issue&id=4531&key=8'

# 从环境变量或其他安全方式加载数据库配置
db_config = {
    'user': 'root',
    'password': '1',
    'host': 'localhost',  # 或者你的数据库服务器地址
    'database': 'journal',
    'charset': 'utf8mb4',    # 字符集设置
    'cursorclass': pymysql.cursors.DictCursor  # 使用字典游标
}

def fetch_and_parse(url):
    try:
        response = requests.get(url, timeout=10)  # 设置请求超时时间为10秒
        response.raise_for_status()
        return BeautifulSoup(response.text, 'html.parser')
    except requests.RequestException as e:
        logging.error(f"请求失败: {e}")
        raise

def extract_title(soup):
    title_tag = soup.find('div', class_='wzbx')
    if not title_tag:
        logging.warning("未找到标题")
        return '标题未找到'
    return title_tag.get_text(strip=True)

def extract_abstract(soup):
    abstract_span = soup.find('span', style=lambda s: s and 'background-color:#FFFFFF;' in s and 'font-size:14px;' in s)
    if not abstract_span:
        logging.warning("未找到摘要")
        return '摘要未找到'
    abstract_parts = abstract_span.get_text(strip=True).split('提示:')
    return abstract_parts[-1].strip() if len(abstract_parts) > 1 else abstract_span.get_text(strip=True)

def extract_keywords(soup):
    keywords_divs = soup.find_all('div', class_='wzzy')
    if len(keywords_divs) < 2:
        logging.warning("未找到关键词")
        return ['关键词未找到']
    keywords_div = keywords_divs[1]
    b_tag = keywords_div.find('b')
    if not b_tag or b_tag.get_text(strip=True) != '关键词：':
        logging.warning("关键词格式不符合预期")
        return ['关键词未找到']
    # 使用逗号或其他分隔符分隔关键词
    keywords_text = keywords_div.get_text().split('关键词：')[-1].strip()
    # 假设关键词之间用逗号分隔，如果是其他分隔符，请相应调整
    return [keyword.strip() for keyword in keywords_text.split(',') if keyword.strip()]

def extract_authors(soup):
    author_div = soup.find('div', class_='wzzy')
    while author_div and '作者' not in author_div.get_text(strip=True):
        author_div = author_div.find_next_sibling('div', class_='wzzy')
    if not author_div:
        logging.warning("未找到作者")
        return ['作者未找到']
    authors_text = author_div.get_text(strip=True).split('作者：')[-1].strip()
    if not authors_text:
        logging.warning("作者信息为空")
        return ['作者未找到']
    # 使用逗号或其他分隔符分隔作者
    return [author.strip() for author in authors_text.split(',') if author.strip()]

def insert_into_db(title, abstract, keywords, authors, db_config):
    try:
        connection = pymysql.connect(**db_config)
        with connection.cursor() as cursor:
            sql = """
                INSERT INTO paper1 (title, abstract, keywords, authors)
                VALUES (%s, %s, %s, %s)
            """
            logging.info("准备插入数据到数据库")
            cursor.execute(sql, (title, abstract, ', '.join(keywords), ', '.join(authors)))
            connection.commit()
            logging.info("数据成功插入数据库")
    except MySQLError as err:
        logging.error(f"MySQL 错误: {err}")
        connection.rollback()
    except Exception as e:
        logging.error(f"其他错误: {e}")
        connection.rollback()
    finally:
        if connection.open:
            connection.close()

if __name__ == "__main__":
    try:
        soup = fetch_and_parse(url)
        title = extract_title(soup)
        abstract = extract_abstract(soup)
        keywords = extract_keywords(soup)
        authors = extract_authors(soup)

        print(f"标题: {title}")
        print(f"摘要: {abstract}")
        print(f"关键词: {', '.join(keywords)}")
        print(f"作者: {', '.join(authors) if isinstance(authors, list) else authors}")

        insert_into_db(title, abstract, keywords, authors, db_config)
    except Exception as e:
        logging.error(f"程序运行时发生错误: {e}")



