#爬取软件学报文章
import requests
import pymysql
from bs4 import BeautifulSoup
def scrawler(url,selector,flag):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0'}
    r=requests.get(url,headers=headers)
    codes = r.text
    bs = BeautifulSoup(codes,'html.parser')
    if flag==0:
        return [item.text for item in bs.select(selector=selector)]
    elif flag==1:
        return [item.attrs['href'] for item in bs.select(selector=selector)]

'''
自己写一个数据库链接操作的类
'''

class DBTool:
    def __init__(self):
        self.conn = pymysql.connect(host='localhost', database="a",
                                    user='root3', password='88888888',
                                    charset='utf8')
        self.cursor = self.conn.cursor()

    def insert(self, title, author, abstract, pdfurl, pubdate):
        try:
            self.cursor.execute('''  
                INSERT INTO journal_articles (title, author, abstract, pdfurl, pubdate)  
                VALUES (%s, %s, %s, %s, %s)  
            ''', (title, author, abstract, pdfurl, pubdate))
            self.conn.commit()
            return True
        except pymysql.MySQLError as e:
            print(f"Database error: {e}")
            self.conn.rollback()
            return False
        except Exception as e:
            print(f"An unexpected error occurred: {e}")
            self.conn.rollback()
            return False

    def close_connection(self):
        self.cursor.close()
        self.conn.close()


# ... 其他代码保持不变 ...
"sdfjoifjoi"
if __name__ == '__main__':
    url = "https://biotech.aiijournal.com/CN/10.13560/j.cnki.biotech.bull.1985.2024-0234"
    db = DBTool()
    # 假设每个选择器都正确地返回了一个列表，但这里我们只取第一个元素（如果有的话）
    title = scrawler(url, "#goTop > div.container.whitebg > div.abs-con > div > div > h3:nth-child(4)", flag=0)
    if title:
        title = title[0]  # 取列表中的第一个元素
    else:
        title = '未知标题'

    authors = scrawler(url, "#goTop > div.container.whitebg > div.abs-con > div > div > p:nth-child(5)", flag=0)
    if authors:
        author = ', '.join(authors)  # 将作者列表合并为一个字符串
    else:
        author = '未知作者'

        # 注意：这里假设abstract和pdfurl也是类似的，但你需要根据实际页面结构来调整选择器
    abstract = scrawler(url, "#p00005", flag=0)
    if abstract:
        abstract = abstract[0]  # 如果只返回一个段落，取第一个元素
    else:
        abstract = '无摘要'

    pdfurl = scrawler(url, "#goTop > div.container.whitebg > div.abs-con > div > div > div.group.clearfix > div > div:nth-child(2) > span > a > h2", flag=0)
    if pdfurl:
        pdfurl = pdfurl[0]  # 假设只返回一个链接
    else:
        pdfurl = None

    pubdate = scrawler(url, "#divPanel > ul > li:nth-child(1) > span:nth-child(4)", flag=0)
    if pubdate:
        pubdate = pubdate[0]  # 取列表中的第一个元素
    else:
        pubdate = '未知日期'

        # 插入数据库
    if db.insert(title, author, abstract, pdfurl, pubdate):
        print("插入成功")
    else:
        print("插入失败")

        # 关闭数据库连接
    db.close_connection()