from datetime import datetime

import requests
import random
import time

from bs4 import BeautifulSoup
from django.db import transaction

from news.core.models.news import NewsItem
from utils.logger import logger


class GXBHCrawler:
    """广西北海新闻网爬虫"""

    BASE_URL = "https://www.gxbhxww.cn"

    category_map = {
        "时政": 179,
        "要闻": 2016,
        "视频北海": 160729,
        "城事": 2017,
        "珠城法治": 160640,
        "广播": 160637,
    }
    reverse_category_map = {v: k for k, v in category_map.items()}

    # YAOWEN_URL = f"{BASE_URL}/node_2016.html"

    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def get_news_list_page(self, page: int = 1, category: str = None, retry=3):
        """获取要闻新闻列表"""
        for attempt in range(retry):
            try:
                category: str = category or ""
                page: int = int(page)
                url: str = f"{self.BASE_URL}/node_{self.category_map[category]}{'_' + str(page) if page > 1 else ''}.html"

                logger.info(f"正在获取新闻列表: {url}")

                response = requests.get(url, headers=self.headers)
                response.encoding: str = "utf-8"

                if response.status_code == 503:
                    logger.warning(f"服务暂时不可用(503)，第{attempt + 1}次重试...")
                    time.sleep(5)  # 503错误时延长等待时间
                    continue

                if response.status_code != 200:
                    logger.error(f"获取新闻列表失败: {response.status_code}")
                    return []

                soup = BeautifulSoup(response.text, "html.parser")
                news_items = []

                # 根据网站结构提取新闻列表
                news_list = soup.select("li.news_li")

                for item in news_list:
                    link_tag = item.find("a")
                    if not link_tag:
                        continue

                    # 提取新闻标题
                    title_tag = item.select_one(".news_txt h3")
                    title = title_tag.get_text(strip=True) if title_tag else ""

                    # 提取链接
                    link = link_tag.get("href")  # type: ignore
                    if link and not link.startswith("http"):
                        link = self.BASE_URL + link.lstrip("/")

                    if title and link:
                        logger.info(f"获取新闻列表成功: {category}")

                        news_items.append(
                            {
                                "news_item_title": title,
                                "news_item_link": link,
                                "news_item_cate": category,
                            }
                        )

                return news_items

            except Exception as e:
                logger.error(f"爬取新闻列表异常: {str(e)}")
                return []

    def get_news_detail(self, url, retry=3):
        """获取新闻详情
        Args:
            url: 新闻详情页URL
            retry: 重试次数，默认3次
        """
        for attempt in range(retry):
            try:
                # 添加随机延迟避免被封
                time.sleep(random.uniform(0.5, 1.5))

                # 更新请求头
                headers = {
                    **self.headers,
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
                    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
                    "Connection": "keep-alive",
                    "Referer": self.BASE_URL,
                }

                response = requests.get(url, headers=headers, timeout=10)
                response.encoding: str = "utf-8"

                if response.status_code == 503:
                    logger.warning(f"服务暂时不可用(503)，第{attempt + 1}次重试...")
                    time.sleep(5)  # 503错误时延长等待时间
                    continue

                if response.status_code != 200:
                    logger.error(f"获取新闻详情失败: {response.status_code}")
                    return None

                soup = BeautifulSoup(response.text, "html.parser")

                # 提取标题
                title = soup.select_one(".article-title")
                if title:
                    title = title.get_text(strip=True)

                # 提取副标题
                subtitle = soup.select_one(".article_subtitle")
                if subtitle:
                    subtitle = subtitle.get_text(strip=True)

                # 提取来源和时间
                source = soup.select_one(".article-source")
                if source:
                    source = source.get_text(strip=True).replace("来源：", "")

                publish_time = soup.select_one(".article-time")
                if publish_time:
                    publish_time = publish_time.get_text(strip=True).replace(
                        "时间：", ""
                    )
                    try:
                        publish_time = datetime.strptime(
                            publish_time, "%Y-%m-%d"
                        ).date()
                    except ValueError:
                        publish_time = None

                # 提取内容
                content_div = soup.select_one(".cont-article")
                if not content_div:
                    return None

                # 处理图片和视频
                images = []
                videos = []

                # 处理图片
                for img in content_div.find_all("img"):
                    img_src = img.get("src")
                    if img_src:
                        if not img_src.startswith("http"):
                            img_src = self.BASE_URL + img_src.lstrip("/")
                        # 提取图片描述
                        description = ""
                        next_sibling = img.find_next_sibling()
                        if next_sibling and "ueditor-text-tushuo" in next_sibling.get(
                            "class", []
                        ):
                            description = next_sibling.get_text(strip=True)
                        images.append(
                            {
                                "url": img_src,
                                "description": description,
                                "width": img.get("data-width", ""),
                                "height": img.get("data-height", ""),
                            }
                        )

                # 处理视频
                for video in content_div.find_all("video"):
                    video_src = video.get("src") or video.get("url")
                    if video_src:
                        if not video_src.startswith("http"):
                            video_src = self.BASE_URL + video_src.lstrip("/")
                        videos.append(
                            {
                                "url": video_src,
                                "poster": video.get("poster", ""),
                                "width": video.get("width", ""),
                                "height": video.get("height", ""),
                                "description": video.get("data-describe", ""),
                                "duration": video.get("data-duration", ""),
                            }
                        )

                # 处理音频
                audios = []
                for audio in content_div.find_all("audio"):
                    audio_src = audio.get("src")
                    if audio_src:
                        if not audio_src.startswith("http"):
                            audio_src = self.BASE_URL + audio_src.lstrip("/")
                        audios.append(
                            {
                                "url": audio_src,
                                "description": audio.get("data-describe", ""),
                                "duration": audio.get("data-duration", ""),
                            }
                        )

                # 获取纯文本内容
                content = content_div.get_text(strip=True)

                # 提取编辑人员
                editor_div = soup.select_one(".cont-article-gaoyuan")
                editor = ""
                if editor_div:
                    editor_text = editor_div.get_text(strip=True)
                    if "编辑：" in editor_text:
                        editor = editor_text.split("编辑：")[1].strip()

                return {
                    "news_item_title": title,
                    "news_item_subtitle": subtitle,
                    "news_item_editor": editor,
                    "news_item_src": source,
                    "news_item_reldate": publish_time,
                    "news_item_content_div": str(content_div),  # 保存HTML内容以保留格式
                    "news_item_content": content,
                    "image_list": images,
                    "video_list": videos,
                    "audio_list": audios,
                }

            except Exception as e:
                logger.error(f"爬取新闻详情异常: {str(e)}")
                return None

    @transaction.atomic
    def crawl_and_save(self, page=1, category=None, limit=10):
        """爬取并保存新闻"""
        news_list: list = self.get_news_list_page(page=page, category=category)
        news_detail: dict = {}
        count = 0

        for news_item in news_list[:limit]:
            news_item_title = news_item["news_item_title"]
            news_item_link = news_item["news_item_link"]

            # 检查链接相同的新闻是否已存在
            if NewsItem.objects.filter(news_item_link=news_item_link).exists():
                logger.info(f"新闻已存在: {news_item_title}，链接：{news_item_link}")
                continue

            if news_item_link.__contains__(
                "www.gxbhxww.cn/content/"
            ):  # 如果连接在新闻网范围内则获取新闻详情
                logger.info(f"开始爬取链接：{news_item_link}")
                news_detail = self.get_news_detail(news_item_link)
                if not news_detail:
                    logger.warning(f"无法获取新闻详情: {news_item_link}")
                    continue

            # 保存图片列表
            image_list = ",".join(
                [img["url"] for img in news_detail.get("image_list", [])]
            )
            # 保存视频列表
            video_list = ",".join(
                [video["url"] for video in news_detail.get("video_list", [])]
            )
            # 保存音频列表
            audio_list = ",".join(
                [audio["url"] for audio in news_detail.get("audio_list", [])]
            )

            # 创建新闻条目
            NewsItem.objects.create(
                news_item_title=news_item_title,  # 新闻标题
                news_item_link=news_item_link,  # 新闻链接
                news_item_cate=news_item["news_item_cate"],  # 新闻分类
                news_item_content=news_detail.get("news_item_content", ""),  # 新闻内容
                news_item_content_div=news_detail.get(
                    "news_item_content_div", ""
                ),  # 新闻内容
                news_item_editor=news_detail.get("news_item_editor", ""),  # 新闻编辑
                news_item_src=news_detail.get("news_item_src", ""),  # 新闻来源
                news_item_reldate=news_detail.get(
                    "news_item_reldate", None
                ),  # 新闻发布时间
                news_item_subtitle=news_detail.get(
                    "news_item_subtitle", ""
                ),  # 新闻副标题
                image_list=image_list if image_list else None,  # 图片列表
                video_list=video_list if video_list else None,  # 视频列表
                audio_list=audio_list if audio_list else None,  # 音频列表
                status=1,  # 已发布状态
                news_item_origin_id=None,  # 新闻来源ID
            )

            count += 1
            logger.info(f"成功保存新闻: {news_item_title}")

        return count
