########################################################################
# GuangZhouNanYangPolytenic Collage
#   广州南洋理工职业学院版权所有
# Copyright © Guangzhou NanYang Polytechnic All Rights Reserved
########################################################################
import requests
from bs4 import BeautifulSoup
import re
import mysql.connector
from datetime import datetime
import time
import random
import traceback
from urllib.parse import urljoin

class SchoolNewsCrawler:
    def __init__(self):
        self.base_url = 'https://www.gznylg.edu.cn'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        }

        self.db_config = {
            'host': 'localhost',
            'port': 3306,
            'user': 'your_user_name',
            'password': 'your_password',
            'database': 'your_data_base',
            'auth_plugin': 'mysql_native_password',
            'use_pure': True,
            'charset': 'utf8mb4'
        }
        self.db_conn = None

        self._connect_db()
        self._create_tables()

    def _connect_db(self):
        try:
            self.db_conn = mysql.connector.connect(**self.db_config)
            print("数据库连接成功")
        except Exception as e:
            print(f"数据库连接失败: {e}")
            traceback.print_exc()
            self.db_conn = None

    def _create_tables(self):
        if not self.db_conn:
            print("无法创建数据表: 数据库未连接")
            return

        try:
            cursor = self.db_conn.cursor()
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS news (
                    id INT AUTO_INCREMENT PRIMARY KEY,
                    news_id INT UNIQUE,
                    category VARCHAR(50),
                    title VARCHAR(500) NOT NULL,
                    pub_time DATETIME,
                    content_html LONGTEXT,
                    content_text LONGTEXT,
                    url VARCHAR(255),
                    crawl_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                    INDEX (news_id),
                    INDEX (pub_time)
                ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
            ''')
            self.db_conn.commit()
            cursor.close()
            print("数据库表创建完成")
        except Exception as e:
            print(f"创建数据表出错: {e}")
            if self.db_conn:
                self.db_conn.rollback()

    def parse_news(self, news_id):
        url = f"{self.base_url}/Home/news/newsContent/newsid/{news_id}"

        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code != 200:
                print(f"获取新闻页面失败，状态码: {response.status_code}, ID: {news_id}")
                return None

            if not response.text:
                print(f"新闻页面内容为空，跳过: {news_id}")
                return None

            soup = BeautifulSoup(response.text, 'html.parser')

            article_title = soup.select_one('#article_title')
            if not article_title:
                print(f"未找到新闻内容区域，跳过: {news_id}")
                return None

            category_elem = article_title.select_one('.new_title')
            category = category_elem.text.strip() if category_elem else "未分类"

            title_elem = article_title.select_one('h3')
            title = title_elem.text.strip() if title_elem else "无标题"

            publish_elem = article_title.select_one('#publish')
            pub_time = None
            if publish_elem:
                pub_time_match = re.search(r'发布时间：\[(.*?)\]', publish_elem.text)
                pub_time = pub_time_match.group(1) if pub_time_match else None

            content_container = article_title.select_one('#content_article')
            if not content_container:
                print(f"未找到新闻内容元素，跳过: {news_id}")
                return None

            for tag in content_container.find_all(['img', 'a']):
                if tag.name == 'img' and tag.has_attr('src'):
                    tag['src'] = urljoin(self.base_url, tag['src'])
                elif tag.name == 'a' and tag.has_attr('href'):
                    tag['href'] = urljoin(self.base_url, tag['href'])

            content_html = str(content_container)
            content_text = content_container.get_text(strip=True)

            if not content_text:
                print(f"新闻内容为空，跳过: {news_id}")
                return None

            return {
                'news_id': news_id,
                'category': category,
                'title': title,
                'pub_time': pub_time,
                'content_html': content_html,
                'content_text': content_text,
                'url': url
            }

        except requests.exceptions.RequestException as e:
            print(f"请求失败 ID:{news_id}, 错误: {e}")
            return None
        except Exception as e:
            print(f"解析新闻失败 ID:{news_id}, 错误: {e}")
            traceback.print_exc()
            return None

    def save_to_db(self, news_data):
        if not self.db_conn:
            print("数据库未连接，尝试重连")
            self._connect_db()
            if not self.db_conn:
                print("重连失败，无法保存数据")
                return False

        try:
            cursor = self.db_conn.cursor()
            cursor.execute("SELECT id FROM news WHERE news_id = %s", (news_data['news_id'],))
            if cursor.fetchone():
                print(f"新闻已存在，跳过: {news_data['news_id']}")
                cursor.close()
                return False

            query = """
                INSERT INTO news (
                    news_id, category, title, pub_time, content_html, content_text, url
                ) VALUES (%s, %s, %s, %s, %s, %s, %s)
            """

            pub_time = None
            if news_data['pub_time']:
                try:
                    pub_time = datetime.strptime(news_data['pub_time'], '%Y-%m-%d %H:%M:%S')
                except ValueError:
                    pass

            values = (
                news_data['news_id'],
                news_data['category'][:50] if news_data['category'] else None,
                news_data['title'][:500] if news_data['title'] else "无标题",
                pub_time,
                news_data['content_html'],
                news_data['content_text'],
                news_data['url'][:255] if news_data['url'] else None
            )

            cursor.execute(query, values)
            self.db_conn.commit()
            cursor.close()
            print(f"新闻已保存: {news_data['news_id']} - {news_data['title']}")
            return True

        except mysql.connector.Error as err:
            print(f"数据库错误: {err}")
            if self.db_conn:
                self.db_conn.rollback()
            if err.errno in (2006, 2013):
                print("尝试重新连接数据库...")
                self._connect_db()
            return False
        except Exception as e:
            print(f"保存失败: {e}")
            traceback.print_exc()
            if self.db_conn:
                self.db_conn.rollback()
            return False

    def crawl_id_range(self, start_id, end_id):
        total = end_id - start_id + 1
        valid_count = 0

        print(f"开始爬取 {start_id}-{end_id} 共{total}条")

        for news_id in range(start_id, end_id + 1):
            try:
                print(f"[进度 {news_id - start_id + 1}/{total}] 处理ID: {news_id}")

                news_data = self.parse_news(news_id)
                if news_data and self.save_to_db(news_data):
                    valid_count += 1

                time.sleep(random.uniform(0.5, 1.5))
            except Exception as e:
                print(f"处理 {news_id} 出错: {e}")
                traceback.print_exc()

        print(f"完成！有效新闻 {valid_count}/{total} 条")
        return valid_count

    def close(self):
        if self.db_conn:
            try:
                self.db_conn.close()
                print("数据库已关闭")
            except:
                pass

def main(test_id, start_id, end_id):
    crawler = None
    try:
        crawler = SchoolNewsCrawler()

        if test_data := crawler.parse_news(test_id):
            print(f"测试成功: {test_data['title']}")
            crawler.save_to_db(test_data)
        else:
            print("测试失败，请检查")

        crawler.crawl_id_range(start_id=start_id, end_id=end_id)

    except Exception as e:
        print(f"运行异常: {e}")
        traceback.print_exc()
    finally:
        if crawler:
            crawler.close()

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description='广州南洋理工职业学院-新闻网爬虫')
    parser.add_argument('--test_id', type=int, default=96800, help='测试用例ID')
    parser.add_argument('--start_id', type=int, default=96800, help='起始ID')
    parser.add_argument('--end_id', type=int, default=96868, help='结束ID')
    args = parser.parse_args()
    
    main(test_id=args.test_id, start_id=args.start_id, end_id=args.end_id)
