import warnings
warnings.filterwarnings("ignore")

import re
import argparse
from pathlib import Path
import pandas as pd
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
import seaborn as sns

try:
    from snownlp import SnowNLP
    SNOWNLP_AVAILABLE = True
except Exception:
    SNOWNLP_AVAILABLE = False


def read_stopwords(path: Path) -> set:
    if path.exists():
        return set(w.strip() for w in path.read_text(encoding='utf-8', errors='ignore').splitlines() if w.strip())
    return set()


def clean_text(text: str) -> str:
    if not isinstance(text, str):
        return ""
    # 移除URL、话题#...#、@用户、表情符号、空白
    text = re.sub(r"https?://\S+", " ", text)
    text = re.sub(r"#.*?#", " ", text)
    text = re.sub(r"@\S+", " ", text)
    text = re.sub(r"\s+", " ", text)
    return text.strip()


def segment(texts, stopwords: set):
    def _seg(t):
        words = [w.strip() for w in jieba.cut(t) if w.strip()]
        words = [w for w in words if w not in stopwords and len(w) > 1]
        return " ".join(words)
    return [ _seg(t) for t in texts ]


def extract_keywords_tfidf(corpus_tokens, topk=30):
    vec = TfidfVectorizer(token_pattern=r"\S+", lowercase=False)
    X = vec.fit_transform(corpus_tokens)
    scores = X.sum(axis=0).A1
    terms = vec.get_feature_names_out()
    df = pd.DataFrame({"term": terms, "score": scores}).sort_values("score", ascending=False)
    return df.head(topk)


def sentiment_score(text: str) -> float:
    if not text:
        return 0.5
    if SNOWNLP_AVAILABLE:
        try:
            return SnowNLP(text).sentiments  # 0-1
        except Exception:
            return 0.5
    # 兜底：简单词典法（演示）
    pos, neg = ["好", "赞", "开心", "满意", "喜欢", "支持"], ["差", "烦", "失望", "讨厌", "生气", "反对"]
    t = text
    p = sum(w in t for w in pos)
    n = sum(w in t for w in neg)
    if p + n == 0:
        return 0.5
    return (p + 1) / (p + n + 2)


def plot_hist(sentiments, out_path: Path):
    plt.figure(figsize=(6,4))
    sns.histplot(sentiments, bins=20, kde=True, color='steelblue')
    plt.title('情感分布直方图 (0=负面, 1=正面)')
    plt.xlabel('情感分值')
    plt.ylabel('样本数')
    plt.tight_layout()
    plt.savefig(out_path, dpi=150)
    plt.close()


def plot_over_time(df: pd.DataFrame, out_path: Path):
    df = df.copy()
    if 'created_at' not in df.columns:
        return
    # 尝试解析日期
    df['created_at'] = pd.to_datetime(df['created_at'], errors='coerce')
    df = df.dropna(subset=['created_at'])
    if df.empty:
        return
    df = df.sort_values('created_at')
    # 按天平均情感
    daily = df.set_index('created_at')['sentiment'].resample('D').mean().dropna()
    if daily.empty:
        return
    plt.figure(figsize=(8,4))
    daily.plot(color='tomato')
    plt.title('情感时间趋势（按日平均）')
    plt.xlabel('日期')
    plt.ylabel('平均情感分值')
    plt.tight_layout()
    plt.savefig(out_path, dpi=150)
    plt.close()


def main():
    parser = argparse.ArgumentParser(description='微博评论公众态度分析 Pipeline')
    parser.add_argument('--input', type=str, default='weibo_crawl.csv', help='输入CSV（含text列）')
    parser.add_argument('--topk', type=int, default=30, help='关键词TopK')
    parser.add_argument('--plot', action='store_true', help='是否输出图表')
    args = parser.parse_args()

    project_root = Path(__file__).resolve().parent
    input_path = project_root / args.input
    stop_path = project_root / 'stopwords.txt'
    out_dir = project_root / 'outputs'
    out_dir.mkdir(parents=True, exist_ok=True)

    print('读取数据...')
    df = pd.read_csv(input_path, encoding='utf-8')
    if 'text' not in df.columns:
        raise ValueError('CSV 必须包含 text 列')

    stopwords = read_stopwords(stop_path)
    print('清洗文本与分词...')
    df['clean'] = df['text'].map(clean_text)
    df['tokens'] = segment(df['clean'].tolist(), stopwords)

    print('提取关键词(TF-IDF)...')
    kw_df = extract_keywords_tfidf(df['tokens'].tolist(), topk=args.topk)
    kw_df.to_csv(out_dir / 'keywords_topk.csv', index=False, encoding='utf-8')

    print('计算情感分值...')
    df['sentiment'] = df['clean'].map(sentiment_score)
    df[['sentiment']].to_csv(out_dir / 'sentiment_summary.csv', index=False, encoding='utf-8')

    if args.plot:
        print('绘制情感倾向图谱...')
        plot_hist(df['sentiment'], out_dir / 'sentiment_hist.png')
        plot_over_time(df, out_dir / 'sentiment_over_time.png')

    print('完成。输出位于: ', out_dir)


if __name__ == '__main__':
    main()


