import pymysql
import requests
from bs4 import BeautifulSoup
class DBTool():
    def __init__(self):
        self.conn = pymysql.connect(
            host='localhost',
            database="journal_management_system",
            user='root',
            password='123456',
            charset='utf8'
        )
        self.cursor = self.conn.cursor()
    # 查询所有记录
    def queryAll(self):
        self.cursor.execute('SELECT * FROM soft_journal')
        return self.cursor.fetchall()
    # 根据标题查询单条记录
    def queryOne(self, title):
        self.cursor.execute('SELECT * FROM soft_journal WHERE title=%s', (title,))
        return self.cursor.fetchone()
    # 插入一条记录
    def insert(self, title, author, abstract, pdfurl, pubdate):
        flag = False
        try:
            sql = 'INSERT INTO soft_journal (title, author, abstract, pdfurl, pubdate) VALUES (%s, %s, %s, %s, %s)'
            self.cursor.execute(sql, (title, author, abstract, pdfurl, pubdate))
            self.conn.commit()
            flag = True
        except Exception as e:
            print("Error during insert:", e)
        return flag

# 爬取网页函数，获取中文信息（通用函数）
def scrawler_cn(url, selector):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'
    }
    try:
        r = requests.get(url, headers=headers, timeout=10)
        r.raise_for_status()  # 检查请求是否成功
    except requests.exceptions.RequestException as e:
        print(f"Error fetching the URL: {e}")
        return ""

    # 解析HTML内容
    soup = BeautifulSoup(r.text, 'html.parser')

    # 定位中文内容
    element = soup.select_one(selector)
    if element:
        return element.get_text(strip=True).replace('\xa0', '').replace('\n', '')
    return "无相关信息"

# 专门爬取作者的函数
def scrawler_authors(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'
    }
    try:
        r = requests.get(url, headers=headers, timeout=10)
        r.raise_for_status()  # 检查请求是否成功
    except requests.exceptions.RequestException as e:
        print(f"Error fetching the URL: {e}")
        return ""

    # 解析HTML内容
    soup = BeautifulSoup(r.text, 'html.parser')

    # 定位作者信息
    author_section = soup.find('p', {'data-toggle': 'collapse'})
    if author_section:
        author_spans = author_section.find_all('span')
        authors = [span.text.strip() for span in author_spans if span.text.strip()]
        return ", ".join(authors)
    return "未知作者"

# 主程序
if __name__ == '__main__':
    # 数据库工具对象
    dbTool = DBTool()
    # 论文列表
    papers = [
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2312080", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2308100", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2401033", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2405027", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2407069", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2406054", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2406028", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2406036", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2406057", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2308010", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2312020", "pubdate": "2023-7-5"},
        # {"url": "http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2311022", "pubdate": "2023-7-5"},
        {"url":"http://fcst.ceaj.org/CN/10.3778/j.issn.1673-9418.2402004","pubdate": "2023-7-5"}
        # # # 添加更多 URL...
    ]
    # 遍历论文列表，爬取信息并插入数据库
    for paper in papers:
        url = paper["url"]
        pubdate = paper["pubdate"]
        # 爬取中文信息
        title = scrawler_cn(url, "h3.abs-tit")  # 中文标题
        authors = scrawler_authors(url)  # 作者
        abstract = scrawler_cn(url, "div.panel-body.line-height.text-justify p")  # 中文摘要
        pdfurl = url  # 使用 URL 作为 PDF 链接
        # 插入到数据库
        if dbTool.insert(title, authors, abstract, pdfurl, pubdate):
            print(f"Inserted: {title}")
        else:
            print(f"Failed to insert: {title}")

    # 查询所有记录并打印
    all_records = dbTool.queryAll()
    for record in all_records:
        print(record)

