import sqlite3
import requests
from bs4 import BeautifulSoup

urls = [
    "https://www.qikanchina.com/thesis/detail/6913749",
    "https://www.qikanchina.com/thesis/detail/6913750"
]

# 发送HTTP GET请求
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
for url in urls:
    response = requests.get(url, headers=headers)

    # 检查响应状态码
    if response.status_code != 200:
        print("Failed to retrieve the webpage")
        exit()

    # 解析HTML内容
    soup = BeautifulSoup(response.text, 'html.parser')

    # 连接到SQLite数据库
    conn = sqlite3.connect('journal_papers.db')
    cursor = conn.cursor()

    # 创建表
    cursor.execute('''  
    CREATE TABLE IF NOT EXISTS papers (  
        id INTEGER PRIMARY KEY AUTOINCREMENT,  
        title TEXT NOT NULL,  
        authors TEXT,  
        abstract TEXT NOT NULL  
    )  
    ''')

    # 每篇论文都包含在一个class为'resource-detail clearfix'的div中
    papers_divs = soup.find_all('div', class_='resource-detail clearfix')

    # 遍历每个div并提取信息
    for paper_div in papers_divs[:2]:  # 只取前两条记录
        title_div = paper_div.find('div', class_='article-title').find('h1')
        title = title_div.get_text(strip=True) if title_div else None

        abstract_div = paper_div.find('div', class_='abstract').find('span', class_='abstract')
        abstract = abstract_div.get_text(strip=True) if abstract_div else None

        author_div = paper_div.find('div', class_='author')
        authors = ', '.join([a.get_text(strip=True) for a in author_div.find_all('a')]) if author_div else None

        # 插入数据到数据库
        cursor.execute("INSERT INTO papers (title, authors, abstract) VALUES (?, ?, ?)", (title, authors, abstract))

        # 提交事务
        conn.commit()

        # 查询并打印数据以验证是否插入成功
        cursor.execute("SELECT * FROM papers")
        for row in cursor.fetchall():
            print(row)

        # 关闭连接
        conn.close()