import requests
from bs4 import BeautifulSoup
from dbutils import DBTool


def scrawler(url, selector, flag):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0'}
    r = requests.get(url, headers=headers)
    codes = r.text
    bs = BeautifulSoup(codes, 'html.parser')
    if flag == 0:
        return [item.text for item in bs.select(selector)]
    elif flag == 1:
        return [item.attrs['href'] for item in bs.select(selector)]


if __name__ == '__main__':
    # 开始调用函数进行爬取
    url = "http://dqkxxb.cnjournals.org/dqkxxb/article/abstract/20240501?st=article_issue"
    db = DBTool()

    # 爬取标题
    title = scrawler(url, "div.p2 > div.zh > div.title", flag=0)
    title = ''.join(title).strip()  # 将列表转换为字符串并去除两端空格


    # 爬取作者
    author1 = scrawler(url, "#cp-cont", flag=0)
    # 清理 author 字符串，去除多余的换行符和空格
    cleaned_author1 = author1[0].replace('\r\n', '').strip()
    # 分割作者名字和年份
    # 假设作者名字和年份之间有一个逗号和一个点
    author1_info = cleaned_author1.split(',')
    author = ','.join(author1_info[0:7]).strip()  # 获取所有名字部分

    # 爬取摘要
    abstract = scrawler(url, "#CnAbstractValue", flag=0)
    abstract = ''.join(abstract).strip()

    # 爬取发表日期
    publishdate = scrawler(url, "#PublishTimeValue", flag=0)
    publishdate = ''.join(publishdate).strip()

    # 爬取下载链接
    downloadurl = scrawler(url, "#PdfUrl", flag=1)
    downloadurl = ''.join(downloadurl).strip()

    print(title)
    print(author)
    print(abstract)
    print(publishdate)
    print(downloadurl)

    # 插入数据库
    if db.insert(title, author, abstract, publishdate, downloadurl):
        print(db.queryAll())

