"""
基于简易示例数据集的中文评论情感分析服务。

使用 jieba 进行中文分词，TfidfVectorizer 进行文本向量化，
MultinomialNB 进行朴素贝叶斯分类。

注意：当前实现使用内置的小规模训练数据，用于演示与原型验证，
生产环境应替换为更大规模、标注完善的数据集，并处理模型持久化。
"""

from __future__ import annotations

from dataclasses import dataclass
from typing import Dict, List, Tuple

import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB


@dataclass
class SentimentResult:
    label: str
    confidence: float
    probabilities: Dict[str, float]


_TRAINING_DATA: List[Dict[str, object]] = [
    {"text": "这个讨论很有价值，收获很多新知识", "label": 1},
    {"text": "讲解清晰透彻，期待更多内容", "label": 1},
    {"text": "界面友好，搜索也很方便", "label": 1},
    {"text": "帮助我理解了信息工程的基础概念", "label": 1},
    {"text": "帖子内容严谨，点赞支持", "label": 1},
    {"text": "体验太差了，信息过于混乱", "label": 0},
    {"text": "内容重复又枯燥，没有帮助", "label": 0},
    {"text": "页面加载很慢，让人失望", "label": 0},
    {"text": "回复质量太低，浪费时间", "label": 0},
    {"text": "没有解决我的问题，差评", "label": 0},
]

_VECTORIZER = TfidfVectorizer(tokenizer=jieba.lcut, token_pattern=None)
_MODEL = MultinomialNB()

_X_TRAIN = _VECTORIZER.fit_transform([item["text"] for item in _TRAINING_DATA])
_Y_TRAIN = [int(item["label"]) for item in _TRAINING_DATA]
_MODEL.fit(_X_TRAIN, _Y_TRAIN)

_LABEL_MAP = {1: "好", 0: "坏"}

SENSITIVE_KEYWORDS: List[str] = [
    # 非法 / 违法行为
    "违法",
    "非法",
    "犯罪",
    "诈骗",
    "走私",
    "贩卖",
    "行贿",
    "受贿",
    "毒品",
    "赌博",
    "洗钱",
    "侵权",
    "造假",
    "伪造",
    "挪用",
    "勒索",
    # 暴力 / 恐怖 / 极端
    "恐怖",
    "恐怖主义",
    "极端主义",
    "爆炸",
    "炸弹",
    "枪支",
    "武器",
    "杀人",
    "暴乱",
    "动乱",
    "极端",
    "暴力",
    "威胁",
    "攻击",
    "袭击",
    "绑架",
    "劫持",
    "袭扰",
    "自杀",
    "自残",
    "血腥",
    # 仇恨 / 侮辱
    "仇恨",
    "歧视",
    "偏见",
    "辱骂",
    "骂人",
    "脏话",
    "侮辱性",
    "恶毒",
    "民族仇恨",
    "宗教极端",
    "侮辱",
    "人身攻击",
    "羞辱",
    "诽谤",
    "中伤",
    "诋毁",
    # 色情 / 低俗
    "色情",
    "低俗",
    "黄色",
    "情色",
    "淫秽",
    "裸露",
    "性交易",
    "约炮",
    "不雅",
    "成人内容",
    "卖淫",
    "嫖娼",
    # 垃圾 / 垃圾信息
    "垃圾",
    "垃圾内容",
    "spam",
    "广告推广",
    "营销号",
    "刷屏",
    "引流",
    "套路",
    "假活动",
    "骗局",
    "销售电话",
    # 政治敏感
    "政治敏感",
    "颠覆",
    "煽动",
    "分裂",
    "叛国",
    "造反",
    "反政府",
    "反动",
    "推翻",
    "暴恐",
    "恐慌",
    "造谣",
    # 其他高风险词汇
    "赌博网站",
    "赌球",
    "黑市",
    "地下钱庄",
    "走私通道",
    "作弊器",
    "黑客",
    "木马",
    "病毒",
    "暗网",
    "网暴",
    "人肉搜索",
    "隐私泄露",
]


def analyze_comment(comment: str) -> SentimentResult:
    """
    对评论内容进行情感判定。
    """
    vector = _VECTORIZER.transform([comment])
    proba = _MODEL.predict_proba(vector)[0]
    class_prob = dict(zip(_MODEL.classes_, proba))

    positive_score = float(class_prob.get(1, 0.0))
    negative_score = float(class_prob.get(0, 0.0))
    predicted_class = 1 if positive_score >= negative_score else 0

    probabilities = {
        "好": positive_score,
        "坏": negative_score,
    }

    return SentimentResult(
        label=_LABEL_MAP[predicted_class],
        confidence=max(probabilities.values()),
        probabilities=probabilities,
    )


def detect_sensitive_terms(comment: str) -> List[str]:
    """
    检测评论中是否包含敏感词。
    """
    lowered = comment.lower()
    hits = [keyword for keyword in SENSITIVE_KEYWORDS if keyword.lower() in lowered]
    return hits


def screen_comment(comment: str) -> Tuple[SentimentResult, List[str]]:
    """
    同时返回情感分析结果与敏感词命中列表，方便内容审核。
    """
    sentiment = analyze_comment(comment)
    sensitive_terms = detect_sensitive_terms(comment)
    return sentiment, sensitive_terms

