# -*- coding: utf-8 -*-
# time: 2025/10/25 9:36
# file: spider.py
# author: kai
# email: 1071664616@qq.com

"""
抓取新华网健康资讯
"""
import re
import hashlib
import json
from datetime import datetime
from urllib.parse import urljoin

import requests
import picologging as logging

from apps.health.models import HealthNews, SensitiveWord

logger = logging.getLogger("health.service")

def news_crawler():
    """
    健康资讯抓取
    :return:
    """
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Pragma": "no-cache",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": '"Google Chrome";v="141", "Not?A_Brand";v="8", "Chromium";v="141"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": '"Windows"',
        'Cookie': 'uid=d4da91c9305c420eb2f10bf338c37ce0; wdcid=4d68cf5a7e5e5106; wdlast=1761284883',
    }
    apis = [
        {"name": "index", "url": "https://www.xinhuanet.com/health/ds_4c7c245375274bc49c8528bd3d2a0ce1.json"},
        {"name": "djk", "url": "https://www.xinhuanet.com/health/djk/ds_54aca4d3e5874a468bc0ff838210990a.json"},
        {"name": "kph", "url": "https://www.xinhuanet.com/health/kph/ds_38d2f65657564e4c9eb78a80ff10b5a7.json"},
    ]
    for api in apis:
        name = api.get("name")
        url = api.get("url")
        if name == "index":
            headers['Referer'] = "https://www.xinhuanet.com/health/index.html"
            response = requests.get(url, headers=headers)
        elif name == "djk":
            headers['Referer'] = "https://www.xinhuanet.com/health/djk/index.html"
            response = requests.get(url, headers=headers)
        else:
            headers['Referer'] = "https://www.xinhuanet.com/health/kph/index.html"
            response = requests.get(url, headers=headers)
        data = response.json()
        datasource = data.get("datasource")
        # 收集资讯数据用于批量插入
        news_list = []
        for item in datasource:
            title = re.sub(r'<a[^>]*>(.*?)</a>', r'\1', item.get("showTitle", ""))
            url = item.get("publishUrl", "")
            if not url.startswith("http"):
                url = urljoin("https://www.xinhuanet.com/", url)
            publish_time = datetime.strptime(
                item.get("publishTime", ""), "%Y-%m-%d %H:%M:%S"
            )
            images = item.get("titleImages", [])
            title_image = images[0].get("imageUrl", "") if len(images) > 0 else ""
            if title_image.startswith(".."):
                title_image = title_image.replace("..", "https://www.xinhuanet.com/health")
            elif not title_image.startswith("http"):
                title_image = urljoin("https://www.xinhuanet.com/health", title_image)
            hash_string = json.dumps(
                {"title": title, "url": url, "publish_time": publish_time}
            )
            hash_value = hashlib.blake2b(
                hash_string.encode("utf-8"), digest_size=32
            ).hexdigest()
            website = "新华网"
            # 收集数据而不是立即保存
            news_list.append({
                'title': title,
                'url': url,
                'publish_time': publish_time,
                'image_url': title_image,
                'hash_value': hash_value,
                'website': website
            })
        # 批量保存所有资讯数据
        batch_save_health_news(news_list)


def words_crawler():
    """
    敏感词抓取
    """
    remote_urls = {
        "COVID-19": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/COVID-19词库.txt",
        "GFW": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/GFW补充词库.txt",
        "其他": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/其他词库.txt",
        "反动": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/反动词库.txt",
        "广告": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/广告类型.txt",
        "政治": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/政治类型.txt",
        "暴恐": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/暴恐词库.txt",
        "民生": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/民生词库.txt",
        "涉枪涉爆": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/涉枪涉爆.txt",
        "色情类型": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/色情类型.txt",
        "色情": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/色情词库.txt",
        "补充": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/补充词库.txt",
        "贪腐": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/贪腐词库.txt",
        "零时-Tencent": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/零时-Tencent.txt",
        "非法网址": "https://gitproxy.click/https://github.com/konsheng/Sensitive-lexicon/blob/main/Vocabulary/非法网址.txt",
    }
    for category, url in remote_urls.items():
        response = requests.get(url)
        if response.status_code == 200:
            words_list = response.text.split("\n")
            words_list = [{"word": word, "category": category} for word in words_list]
            batch_insert_sensitive_words(words_list)
        else:
            print(f"请求 {url} 失败")


def batch_save_health_news(news_list):
    """
    批量保存健康资讯到数据库
    """
    try:
        # 准备要创建的 HealthNews 对象列表
        health_news_objects = []
        for data in news_list:
            health_news_objects.append(
                HealthNews(
                    title=data['title'],
                    url=data['url'],
                    publish_time=data['publish_time'],
                    image_url=data['image_url'],
                    hash_value=data['hash_value'],
                    website=data['website']
                )
            )

        # 使用 bulk_create 批量插入，忽略已存在的记录
        created_objects = HealthNews.objects.bulk_create(
            health_news_objects,
            ignore_conflicts=True,
            batch_size=100  # 每批处理100条记录
        )
        logger.info(f"成功批量保存 {len(created_objects)} 条新资讯")
    except Exception as e:
        logger.warning(f"批量保存资讯失败: {e}")

def batch_insert_sensitive_words(words_list):
    """
    批量插入敏感词
    """
    try:
        # 批量创建，忽略已存在的词
        sensitive_words = []
        for item in words_list:
            if item['word'].strip():  # 确保词不为空
                sensitive_words.append(
                    SensitiveWord(
                        word=item['word'].strip(),
                        category=item['category']
                    )
                )

        # 使用 bulk_create 批量插入，忽略冲突
        SensitiveWord.objects.bulk_create(
            sensitive_words,
            ignore_conflicts=True
        )
        logger.info(f"成功插入 {len(sensitive_words)} 个敏感词")
    except Exception as e:
        logger.warning(f"批量插入敏感词失败: {e}")


def crawl_worker():
    """
    爬虫工作进程
    """
    news_crawler()
    # words_crawler()


if __name__ == "__main__":
    crawl_worker()
