import pymysql
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import time


def scrawler(url, selector, flag, retries=3):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0'}

    for _ in range(retries):
        try:
            r = requests.get(url, headers=headers, timeout=10)  # 设置超时时间为10秒
            r.raise_for_status()  # 检查是否请求成功
            codes = r.text
            bs = BeautifulSoup(codes, 'html.parser')
            if flag == 0:
                return [item.text.strip() for item in bs.select(selector=selector)]
            elif flag == 1:
                return [item.attrs['href'] for item in bs.select(selector=selector)]
        except requests.exceptions.RequestException as e:
            print(f"请求失败: {e}")
            time.sleep(2)  # 等待2秒后重试
    return []  # 如果重试失败，返回空列表


def get_all_issue_urls(base_url, start_issue, end_issue):
    """获取从start_issue到end_issue范围内的有效期刊页面URL"""
    issue_urls = []
    for issue_num in range(start_issue, end_issue - 1, -1):
        url = base_url.format(issue_num)
        issue_urls.append(url)
    return issue_urls


def clean_date(date_str):
    # 例如："发布日期:2024-10-18" -> "2024-10-18"
    if date_str.startswith("发布日期:"):
        date_str = date_str.replace("发布日期:", "").strip()

    # 尝试解析日期
    try:
        # 如果日期格式正确，转换为 YYYY-MM-DD 格式
        return datetime.strptime(date_str, "%Y-%m-%d").date()
    except ValueError:
        # 如果解析失败，返回 None
        return None


def clean_keywords(keywords_str):
    # 去除 "关键词：" 前缀
    return keywords_str.replace("关键词:", "").strip()


def clean_abstract(abstract_str):
    # 去除 "摘要：" 前缀
    return abstract_str.replace("摘要：", "").strip()


def clean_author(author_str):
    # 去除数字和星号（*），只保留字母和中文字符
    return re.sub(r'[\d*]', '', author_str).strip()


class DBTool():
    def __init__(self):
        self.conn = pymysql.connect(host='localhost', database="paper_management"
                                    , user='root', password='123456'
                                    , charset='utf8')
        self.cursor = self.conn.cursor()

    def queryAll(self):
        self.cursor.execute('select * from paper')
        return self.cursor.fetchall()

    def queryOne(self, title):
        self.cursor.execute('select * from paper where title=%s', title)
        return self.cursor.fetchone()

    def insert(self, title, author, keywords, abstract_text, pubdate):
        flag = False
        try:
            self.cursor.execute(
                'insert into paper (title, author, keywords, abstract_text, pubdate) values (%s, %s, %s, %s, %s)',
                (title, author, keywords, abstract_text, pubdate))
            self.conn.commit()
            flag = True
        except Exception as e:
            print(e)
        return flag


if __name__ == '__main__':

    title, author, keywords, abstract_text, update = [], [], [], [], []

    base_url = "http://gxbwk.njournal.sdu.edu.cn/CN/volumn/volumn_{}.shtml"
    start_issue = 118  # 从第118期开始
    end_issue = 93  # 直到第93期
    issue_urls = get_all_issue_urls(base_url, start_issue, end_issue)

    for url in issue_urls:
        print(f"正在爬取期刊页面：{url}")
        article_urls = scrawler(url, "a.biaoti", flag=1)

        if not article_urls:
            print(f"未能爬取到任何文章链接，跳过：{url}")
            continue  # 如果没有获取到文章链接，跳过当前期刊页面

        for article_url in article_urls:
            print(f"正在爬取论文页面：{article_url}")

            # 获取论文标题、作者、关键词、摘要和发布日期
            raw_title = scrawler(article_url,
                                 "#goTop > div.container.whitebg > div.abs-con > div > div > h3:nth-child(4)", flag=0)
            raw_author = scrawler(article_url,
                                  "#goTop > div.container.whitebg > div.abs-con > div > div > p:nth-child(5) > span",
                                  flag=0)
            raw_keywords = scrawler(article_url, "#collapseOne > div > form > p", flag=0)
            raw_abstract = scrawler(article_url, "#collapseOne > div > p:nth-child(1)", flag=0)
            raw_update = scrawler(article_url, "#divPanel > ul > li:nth-child(1) > span:nth-child(5)", flag=0)

            # 清洗数据
            title.append(''.join(raw_title))
            author.append(clean_author(''.join(raw_author)))
            keywords.append(clean_keywords(''.join(raw_keywords)))
            abstract_text.append(clean_abstract(''.join(raw_abstract)))
            update.append(''.join(raw_update))

    # 创建数据库工具对象
    dbTool = DBTool()

    # 遍历爬取的数据并插入数据库
    for i in range(len(title)):
        # 清洗日期并转换为正确的格式
        pubdate = clean_date(update[i])

        if pubdate:
            # 如果日期有效，插入到数据库
            insert_result = dbTool.insert(title[i], author[i], keywords[i], abstract_text[i], pubdate)
            if insert_result:
                print(f"论文《{title[i]}》已成功插入数据库。")
            else:
                print(f"论文《{title[i]}》插入数据库失败。")
        else:
            print(f"日期格式无效：{update[i]}")
