import requests
from bs4 import BeautifulSoup
from sql1 import sql as s
import datetime


def scrawler(url, selector, flag):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0'
    }
    try:
        r = requests.get(url, headers=headers)
        r.encoding = 'utf-8'
        codes = r.text
        bs = BeautifulSoup(codes, 'html.parser')
        if flag == 0:
            return [item.text.strip() for item in bs.select(selector=selector)]
        elif flag == 1:
            return [item.attrs['href'] for item in bs.select(selector=selector)]
    except requests.exceptions.RequestException as e:
        print(f"Error fetching {url}: {e}")
        return []
url = "https://xuebao.tust.edu.cn/xblw/201601/index.html"
articles_urls = scrawler(url, "tbody tr a", flag=1)
print(articles_urls)
articles_urls = articles_urls[5:-4]
print(articles_urls)
authors,titles,abstracts, pdfurl = [], [],[],[]
for relative_url in articles_urls:
    page_url = "https://xuebao.tust.edu.cn/xblw/201601/" + relative_url
    author = scrawler(page_url, "center span", flag=0)
    title = scrawler(page_url, "h2 span", flag=0)
    abstract = scrawler(page_url, ".wenzi p", flag=0)
    pdf_links = scrawler(page_url, "li a", flag=1)
    if abstract:
        abstracts.append(abstract[0].strip())
    else:
        abstracts.append('')
    titles.append(title)
    authors.append(author)
    pdfurl.append(pdf_links if pdf_links else [''])
n = len(abstracts)
for i in range(n):
    today = datetime.datetime.now()
    db = s.DBTool()
    db.insert(abstracts[i],pdfurl[i][0],titles[i][0],authors[i][0],today)
if len(abstracts) > 1 and len(pdfurl) > 1:
    print(f"摘要: {abstracts[1][4:]}")
    print(f"PDF链接: {pdfurl[1][0]}")
    print(f"文章标题：{titles[1][0]}")
    print(f"作者：{authors[1][0]}")

else:
    print("没有足够的文章或内容供显示。")

