# 爬取控制与决策期刊论文

import requests
from bs4 import BeautifulSoup
from dbutils import DBTool
url = "http://kzyjc.alljournals.cn/kzyjc/article/issue/2024_39_10"
def paperScr(url,selector,flag):
    headers={
            'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 SLBrowser/9.0.3.5211 SLBChan/105'
        }
    r = requests.get(url,headers=headers)
    codes = r.text
    bs = BeautifulSoup(codes,"html.parser")
    if flag == 0:
        return [item.text for item in bs.select(selector=selector)]
    elif flag == 1:
        return [item.attrs['href'] for item in bs.select(selector=selector)]
# 论文题目
Title = "div.article_title  a"
# 论文作者
author = "p.article_author  span  a"
paperTitle = paperScr(url,Title,0)
print(paperTitle)
paperAuthor = paperScr(url,author,0)
print(paperAuthor)
paperUrl = paperScr(url,Title,1)
print(paperUrl)
abstracts,pdfurl,pubDate = [],[],[]
for url in paperUrl:
    FullUrl = "http://kzyjc.alljournals.cn/"+url
    # 摘要
    abstracts.append(paperScr(FullUrl,"#CnAbstractValue",0))
    # 链接
    pdfurl.append(paperScr(FullUrl, "#PdfUrl", 1))
    # 日期
    pubDate.append(paperScr(FullUrl,"#all_issue_position > a:nth-child(2)",0))

print(abstracts)
print(pdfurl)
print(pubDate)
dbTool = DBTool()
for abstract,author, pdf,title in zip(abstracts,paperAuthor,pdfurl,paperTitle):
    if dbTool.insert(abstract, author, pdf, title):
        print(f"论文 '{title}' 已成功插入数据库。")
    else:
        print(f"论文 '{title}' 插入数据库失败。")

# 查询并打印所有插入的数据
all_journals = dbTool.queryAll()
for journal in all_journals:
    print(journal)


