import pandas as pd
import re
import requests
import json
import time
import logging
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
from wordcloud import WordCloud
import os
import matplotlib
import hashlib
import sys
import traceback
import concurrent.futures
import threading
import multiprocessing

# ================== 初始化设置 ==================
# 创建结果目录
os.makedirs("results", exist_ok=True)
os.makedirs("visualizations", exist_ok=True)
os.makedirs("cache", exist_ok=True)

# 设置中文字体
plt.rcParams['font.family'] = 'SimHei'  # Windows系统适用
matplotlib.rcParams['axes.unicode_minus'] = False  # 正确显示负号

# 日志配置
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    filename='舆情分析.log',
    filemode='w'
)


# ================== 进度跟踪器 ==================
class ProgressTracker:
    """用于跟踪多任务进度的类"""

    def __init__(self):
        self.total_tasks = 0
        self.completed_tasks = 0
        self.current_task = ""
        self.lock = threading.Lock()
        self.start_time = time.time()

    def reset(self, total_tasks):
        """重置进度计数器"""
        self.total_tasks = total_tasks
        self.completed_tasks = 0
        self.current_task = ""

    def update(self, task_name=None, increment=1):
        """更新进度信息"""
        with self.lock:
            if task_name:
                self.current_task = task_name

            if self.completed_tasks < self.total_tasks:
                self.completed_tasks += increment
                if self.completed_tasks > self.total_tasks:
                    self.completed_tasks = self.total_tasks

                percent = (self.completed_tasks / self.total_tasks) * 100
                elapsed = time.time() - self.start_time
                mins, secs = divmod(elapsed, 60)
                time_str = f"{int(mins):02d}:{int(secs):02d}"

                if self.completed_tasks > 0:
                    remaining = (elapsed / self.completed_tasks) * (self.total_tasks - self.completed_tasks)
                    r_mins, r_secs = divmod(remaining, 60)
                    remain_str = f"{int(r_mins):02d}:{int(r_secs):02d}"
                else:
                    remain_str = "计算中..."

                bar_length = 40
                filled_length = int(bar_length * self.completed_tasks // self.total_tasks)
                bar = '█' * filled_length + '░' * (bar_length - filled_length)

                sys.stdout.write(
                    f"\r[任务进度] {self.current_task} |{bar}| {percent:.1f}% "
                    f"({self.completed_tasks}/{self.total_tasks}) "
                    f"[耗时: {time_str}] [剩余: {remain_str}]"
                )
                sys.stdout.flush()

                if self.completed_tasks >= self.total_tasks:
                    print()


# 创建全局进度跟踪器
progress = ProgressTracker()


# ================== 控制台输出增强函数 ==================
def console_output(message, level="info"):
    """同时输出到控制台和日志"""
    timestamp = time.strftime('%Y-%m-%d %H:%M:%S')
    formatted_message = f"[{timestamp}] {message}"
    print(formatted_message)
    if level == "error":
        logging.error(message)
    elif level == "warning":
        logging.warning(message)
    else:
        logging.info(message)


# ================== DeepSeek API配置 ==================
API_KEY = "sk-cca6d98a50674bc9a1733f1b30a955e8"
API_URL = "https://api.deepseek.com/chat/completions"


# ================== API调用函数 ==================
def call_deepseek_api(prompt, content, max_retries=3, cache=True):
    """调用DeepSeek API进行文本分析，带缓存机制"""
    # 生成缓存键
    query = f"{prompt}\n\n{content}"
    cache_key = hashlib.md5(query.encode('utf-8')).hexdigest() + ".json"
    cache_path = os.path.join("cache", cache_key)

    # 检查缓存
    if cache and os.path.exists(cache_path):
        with open(cache_path, 'r', encoding='utf-8') as f:
            return json.load(f)['response']

    headers = {
        "Authorization": f"Bearer {API_KEY}",
        "Content-Type": "application/json"
    }

    payload = {
        "model": "deepseek-chat",
        "messages": [
            {"role": "system", "content": prompt},
            {"role": "user", "content": content}
        ],
        "temperature": 0.3,
        "max_tokens": 128
    }

    for attempt in range(max_retries):
        try:
            response = requests.post(API_URL, headers=headers, json=payload, timeout=120)
            response.raise_for_status()
            result = response.json()

            if "choices" in result and len(result["choices"]) > 0:
                response_text = result["choices"][0]["message"]["content"].strip()

                # 保存到缓存
                with open(cache_path, 'w', encoding='utf-8') as f:
                    json.dump({"prompt": prompt, "content": content, "response": response_text}, f, ensure_ascii=False)

                return response_text
        except requests.exceptions.RequestException as e:
            time.sleep(3 ** attempt)
        except Exception as e:
            console_output(f"[API] 请求异常: {str(e)}", "error")
            time.sleep(3 ** attempt)

    return ""


# ================== 情感分析函数 ==================
def analyze_sentiment_with_deepseek(text):
    """使用DeepSeek进行情感分析"""
    if not text or len(text) < 10:
        return "中性"

    prompt = (
        "作为社交媒体舆情分析师，请判断以下文本的情感倾向：\n"
        "选项：【积极】表达支持、喜爱、赞赏等正面情绪\n"
        "     【中立】陈述事实、信息传达无情感倾向\n"
        "     【消极】表达反对、批评、不满等负面情绪\n"
        "要求：仅返回一个情感标签，不要解释"
    )

    result = call_deepseek_api(prompt, text)

    # 解析结果
    if "积极" in result:
        return "积极"
    elif "中立" in result:
        return "中立"
    elif "消极" in result:
        return "消极"
    return "中性"


# ================== 主题分类函数 ==================
def classify_topic_with_deepseek(text):
    """使用DeepSeek进行主题分类"""
    if not text or len(text) < 10:
        return "其他"

    prompt = (
        "请将以下社交媒体内容分类到最相关的主题：\n"
        "1. 粉丝支持：表达喜爱、支持或崇拜\n"
        "2. 行为争议：讨论不当行为、争议事件\n"
        "3. 文化冲击：涉及文化差异、文化体验\n"
        "4. 商业影响：商业合作、广告推广相关内容\n"
        "5. 其他：不符合以上任何类别\n"
        "要求：仅返回主题名称（如'粉丝支持'）"
    )

    result = call_deepseek_api(prompt, text)

    # 解析结果
    valid_topics = ["粉丝支持", "行为争议", "文化冲击", "商业影响", "其他"]
    for topic in valid_topics:
        if topic in result:
            return topic
    return "其他"


# ================== 关键词提取函数 ==================
def extract_keywords_with_deepseek(text):
    """使用DeepSeek提取关键词"""
    if not text or len(text) < 10:
        return ""

    prompt = (
        "作为文本分析师，请从以下内容中提取3-5个最重要的关键词\n"
        "要求：\n"
        "1. 只返回关键词，用中文顿号分隔\n"
        "2. 关键词应为有实际意义的名词或名词短语\n"
        "3. 不要解释或添加其他内容"
    )

    result = call_deepseek_api(prompt, text)

    # 清理结果
    result = re.sub(r'[^\w\u4e00-\u9fa5、]', '', result)
    return result


# ================== 数据清洗函数 ==================
def clean_text(text):
    """优化文本清洗函数"""
    if not isinstance(text, str):
        return ""

    cleaned = re.sub(r'<[^>]+>|http\S+', ' ', text)
    cleaned = re.sub(r'#(\S+)', r'\1', cleaned)
    cleaned = re.sub(r'\s+', ' ', cleaned).strip()

    return cleaned[:1000]


# ================== 关键词合并与标准化 ==================
def merge_and_standardize_keywords(keywords_list):
    """合并相似关键词并标准化表达（保留频次）"""
    # 关键词标准化映射
    standardization_map = {
        "重庆话": "重庆方言", "重庆方言": "重庆方言",
        "金箍": "金箍棒", "金箍棒": "金箍棒",
        "辣": "辣椒", "辣椒": "辣椒",
        "回复": "互动", "互动": "互动",
        "粉丝": "粉丝支持", "粉丝支持": "粉丝支持",
        "文化冲击": "文化差异", "文化差异": "文化差异",
        "行为争议": "争议事件", "争议事件": "争议事件",
        "欧冠": "欧洲冠军联赛", "欧洲冠军联赛": "欧洲冠军联赛",
        "中国旅游": "中国行", "中国行": "中国行"
    }

    # 创建合并映射字典
    merged_counter = Counter()
    for kw in keywords_list:
        normalized = standardization_map.get(kw, kw)
        merged_counter[normalized] += 1

    # 返回标准化后的Counter对象（保留频次）
    return merged_counter


# ================== 并行处理函数 ==================
def parallel_process(df, func, task_name, max_workers=6):
    """并行处理DataFrame中的文本"""
    global progress

    # 重置进度
    total_rows = len(df)
    progress.reset(total_tasks=total_rows)
    progress.update(task_name=f"{task_name} - 开始")

    # 创建结果列表
    results = [""] * total_rows

    # 使用线程池并行处理
    try:
        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有任务
            futures = []
            for i in range(total_rows):
                text = df.iloc[i]['清洗文本']
                future = executor.submit(func, text)
                futures.append((i, future))

            # 处理完成的结果
            for i, future in futures:
                try:
                    results[i] = future.result()
                    progress.update(increment=1)
                except Exception as e:
                    console_output(f"处理失败: {e}", "error")
                    results[i] = ""
    except Exception as e:
        console_output(f"并行处理失败: {e}", "error")

    progress.update(task_name=f"{task_name} - 完成")
    return results


# ================== 可视化函数 ==================
def create_visualizations(df, platform_name):
    """创建可视化图表，修复关键词统计问题"""
    console_output(f"[可视化] 开始为 {platform_name} 生成图表")
    plt.style.use('ggplot')

    try:
        # 1. 情感分布饼图
        plt.figure(figsize=(10, 7))
        sentiment_counts = df['sentiment'].value_counts()
        labels = sentiment_counts.index.tolist()

        colors = ['#4caf50', '#ff9800', '#f44336']
        plt.pie(sentiment_counts, labels=labels, colors=colors,
                autopct=lambda pct: f'{pct:.1f}%', startangle=90)
        plt.title(f'{platform_name}情感分布', fontsize=14)
        plt.savefig(f'visualizations/{platform_name}_情感分布.png', bbox_inches='tight', dpi=300)
        plt.close()

        # 2. 主题分布条形图
        plt.figure(figsize=(12, 6))
        topic_counts = df['topic'].value_counts()

        ax = topic_counts.plot.barh(color='#2196f3', alpha=0.8)
        ax.set_title(f'{platform_name}主题分布', fontsize=14)
        ax.set_xlabel('评论数量', fontsize=12)

        for i, v in enumerate(topic_counts):
            ax.text(v + max(topic_counts) * 0.01, i - 0.1, str(v), va='center', fontsize=10)

        plt.tight_layout()
        plt.savefig(f'visualizations/{platform_name}_主题分布.png', bbox_inches='tight', dpi=300)
        plt.close()

        # 3. 关键词分析（重点修复）
        all_keywords = []
        for keywords in df['keywords']:
            if isinstance(keywords, str):
                parts = keywords.split('、')
                all_keywords.extend([kw.strip() for kw in parts if kw.strip()])

        # 检查关键词数量
        if not all_keywords:
            console_output("[可视化] 无有效关键词数据，跳过关键词图表生成", "warning")
            return

        # 打印原始关键词信息
        console_output(f"[关键词] 原始关键词数量: {len(all_keywords)}")
        console_output(f"[关键词] 前10个原始关键词: {all_keywords[:10]}")

        # 应用合并和标准化（保留频次）
        merged_counter = merge_and_standardize_keywords(all_keywords)

        # 过滤停用词
        chinese_stopwords = {"的", "了", "和", "是", "在", "就", "也", "与", "这个", "那个", "这样", "没有", "不是",
                             "什么", "如何"}
        filtered_counter = Counter()
        for kw, count in merged_counter.items():
            if kw not in chinese_stopwords:
                filtered_counter[kw] = count

        # 获取TOP20关键词
        if filtered_counter:
            console_output(f"[关键词] 过滤后关键词数量: {len(filtered_counter)}")
            console_output(f"[关键词] 过滤后最高频关键词: {filtered_counter.most_common(5)}")

            top_keywords = filtered_counter.most_common(20)
            words, counts = zip(*top_keywords) if top_keywords else ([], [])

            # TOP20关键词图
            plt.figure(figsize=(12, 8))
            plt.barh(words, counts, color='#9c27b0')
            plt.title(f'{platform_name}TOP20高频关键词', fontsize=14)
            plt.xlabel('出现频次', fontsize=12)

            # 在条形上添加数值
            max_count = max(counts) if counts else 1
            for index, value in enumerate(counts):
                plt.text(value + max_count * 0.01, index, str(value), va='center', fontsize=10)

            plt.tight_layout()
            plt.savefig(f'visualizations/{platform_name}_关键词词频.png', bbox_inches='tight', dpi=300)
            plt.close()

            # 4. 词云图
            try:
                # 根据频次生成词云
                wordcloud = WordCloud(
                    font_path='simhei.ttf' if os.name == 'nt' else None,
                    width=1200,
                    height=800,
                    background_color='white',
                    max_words=100,
                    colormap='viridis',
                    prefer_horizontal=0.9
                ).generate_from_frequencies(filtered_counter)

                plt.figure(figsize=(15, 10))
                plt.imshow(wordcloud, interpolation='bilinear')
                plt.axis('off')
                plt.title(f'{platform_name}舆情关键词词云', fontsize=16)
                plt.savefig(f'visualizations/{platform_name}_词云.png', bbox_inches='tight', dpi=300)
                plt.close()
            except Exception as e:
                console_output(f"[可视化] 生成词云失败: {e}", "error")
        else:
            console_output("[可视化] 过滤后无有效关键词数据，跳过关键词图表生成", "warning")

    except Exception as e:
        console_output(f"[可视化] 生成失败: {e}", "error")
    finally:
        console_output(f"[可视化] {platform_name}图表生成完成")


# ================== 平台分析函数 ==================
def analyze_platform(data_path, platform_name, content_column):
    """分析单个平台数据 - 使用并行处理"""
    console_output("=" * 50)
    console_output(f"开始分析 {platform_name} 数据")
    console_output(f"数据文件: {data_path}")
    console_output(f"内容列: {content_column}")

    try:
        # 1. 加载数据
        console_output("加载数据...")
        if data_path.endswith('.xlsx'):
            df = pd.read_excel(data_path)
        elif data_path.endswith('.csv'):
            try:
                df = pd.read_csv(data_path)
            except:
                df = pd.read_csv(data_path, encoding='gbk')
        else:
            raise ValueError(f"不支持的文件格式: {data_path}")

        console_output(f"加载数据: {len(df)}条")

        # 2. 温和清洗
        console_output("清洗文本数据...")
        df['清洗文本'] = df[content_column].apply(clean_text)
        df = df[df['清洗文本'] != '']

        console_output(f"清洗后有效数据: {len(df)}条")

        if df.empty:
            console_output(f"清洗后无有效数据，跳过分析", "error")
            return None

        # 3. 并行处理 - 情感分析
        console_output("开始情感分析...")
        df['sentiment'] = parallel_process(df, analyze_sentiment_with_deepseek, "情感分析", max_workers=8)

        # 4. 并行处理 - 主题分类
        console_output("开始主题分类...")
        df['topic'] = parallel_process(df, classify_topic_with_deepseek, "主题分类", max_workers=8)

        # 5. 并行处理 - 关键词提取
        console_output("开始关键词提取...")
        df['keywords'] = parallel_process(df, extract_keywords_with_deepseek, "关键词提取", max_workers=8)

        # 6. 生成可视化
        console_output("生成可视化图表...")
        create_visualizations(df, platform_name)

        # 7. 保存结果
        output_path = f"results/{platform_name}_分析结果.xlsx"
        df.to_excel(output_path, index=False)
        console_output(f"分析完成! 结果保存到: {output_path}")
        console_output("=" * 50)

        return df

    except Exception as e:
        console_output(f"分析失败: {e}", "error")
        traceback.print_exc()
        return None


# ================== 数据质量分析函数 ==================
def analyze_data_quality(df, platform_name):
    """分析数据质量并生成报告"""
    if df is None or df.empty:
        return None

    try:
        # 计算平均文本长度
        text_lengths = [len(text) for text in df['清洗文本']]
        avg_length = sum(text_lengths) / len(text_lengths) if text_lengths else 0

        # 确保所有数值都是Python基本类型
        quality_report = {
            "平台": platform_name,
            "总数据量": int(len(df)),
            "有效数据量": int(len(df[df['清洗文本'] != ''])),
            "平均文本长度": float(avg_length),
            "情感分布": {k: int(v) for k, v in df['sentiment'].value_counts().items()},
            "主题分布": {k: int(v) for k, v in df['topic'].value_counts().items()}
        }

        # 添加关键词分析
        all_keywords = []
        for keywords in df['keywords']:
            if isinstance(keywords, str):
                parts = keywords.split('、')
                all_keywords.extend([kw.strip() for kw in parts if kw.strip()])

        # 合并相似关键词
        if all_keywords:
            merged_counter = merge_and_standardize_keywords(all_keywords)
            top_keywords = merged_counter.most_common(10)
            quality_report["TOP10关键词"] = {kw: int(count) for kw, count in top_keywords}

        # 保存质量报告
        report_path = f"results/{platform_name}_数据质量报告.json"
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(quality_report, f, ensure_ascii=False, indent=2)

        return quality_report

    except Exception as e:
        console_output(f"生成数据质量报告失败: {e}", "error")
        return None


# ================== 跨平台比较 ==================
def create_cross_platform_comparison(results):
    """创建跨平台比较图表"""
    if not results:
        console_output("无结果数据，跳过跨平台比较", "error")
        return

    try:
        console_output("开始生成跨平台比较图表...")

        # 确保有足够的数据进行跨平台比较
        valid_platforms = [name for name, df in results.items() if not df.empty]
        if len(valid_platforms) < 2:
            console_output(f"只有 {len(valid_platforms)} 个平台有有效数据，跳过跨平台比较", "warning")
            return

        # 1. 情感分布对比
        sentiment_data = {}
        for platform, df in results.items():
            if not df.empty:
                sentiment_counts = df['sentiment'].value_counts(normalize=True) * 100
                sentiment_data[platform] = sentiment_counts

        if sentiment_data:
            sentiment_df = pd.DataFrame(sentiment_data).fillna(0)
            plt.figure(figsize=(12, 8))
            sentiment_df.T.plot(kind='bar', stacked=True,
                                color=['#4caf50', '#ff9800', '#f44336'],
                                figsize=(12, 6))
            plt.title('各平台情感分布对比', fontsize=14)
            plt.ylabel('百分比 (%)', fontsize=12)
            plt.legend(title='情感', loc='upper right')
            plt.xticks(rotation=0)
            plt.tight_layout()
            plt.savefig('visualizations/跨平台情感对比.png', dpi=300)
            plt.close()
        else:
            console_output("无有效情感数据，跳过情感分布对比", "warning")

        # 2. 主题分布对比
        topic_data = {}
        for platform, df in results.items():
            if not df.empty:
                topic_counts = df['topic'].value_counts(normalize=True) * 100
                topic_data[platform] = topic_counts

        if topic_data:
            topic_df = pd.DataFrame(topic_data).fillna(0)
            plt.figure(figsize=(14, 8))
            topic_df.T.plot(kind='bar', stacked=False, figsize=(14, 8))
            plt.title('各平台主题分布对比', fontsize=14)
            plt.ylabel('百分比 (%)', fontsize=12)
            plt.legend(title='主题', loc='upper right')
            plt.xticks(rotation=0)
            plt.tight_layout()
            plt.savefig('visualizations/跨平台主题对比.png', dpi=300)
            plt.close()
        else:
            console_output("无有效主题数据，跳过主题分布对比", "warning")

        # 3. 关键词对比
        all_keywords = {}
        for platform, df in results.items():
            if not df.empty:
                keywords = []
                for kw_str in df['keywords']:
                    if isinstance(kw_str, str):
                        keywords.extend(kw_str.split('、'))

                if keywords:
                    merged_counter = merge_and_standardize_keywords(keywords)
                    all_keywords[platform] = merged_counter

        if all_keywords:
            # 创建关键词对比图
            fig, axes = plt.subplots(len(all_keywords), 1, figsize=(12, 6 * len(all_keywords)))
            if len(all_keywords) == 1:
                axes = [axes]

            for i, (platform, counter) in enumerate(all_keywords.items()):
                top_keywords = counter.most_common(10)
                if not top_keywords:
                    continue

                words, counts = zip(*top_keywords)

                ax = axes[i]
                y_pos = np.arange(len(words))
                ax.barh(y_pos, counts, color='#2196f3', alpha=0.8)
                ax.set_yticks(y_pos)
                ax.set_yticklabels(words)
                ax.invert_yaxis()
                ax.set_xlabel('出现频次')
                ax.set_title(f'{platform} TOP10关键词')

            plt.tight_layout()
            plt.savefig('visualizations/跨平台关键词对比.png', dpi=300)
            plt.close()
        else:
            console_output("无有效关键词数据，跳过关键词对比", "warning")

        console_output("跨平台比较图表生成完成")

    except Exception as e:
        console_output(f"创建跨平台比较失败: {e}", "error")


# ================== 主执行流程 ==================
def main():
    """主运行函数"""
    global progress
    start_time = time.time()
    console_output("=" * 80)
    console_output(f"舆情分析系统启动 - {time.strftime('%Y-%m-%d %H:%M:%S')}")
    console_output("=" * 80)
    console_output(f"当前工作目录: {os.getcwd()}")
    console_output(f"处理器数量: {multiprocessing.cpu_count()}")

    try:
        # 定义平台数据源
        platforms = {
            "微博": ("微博搜索关键词采集.xlsx", "博文内容"),
            "B站": ("B站单个视频一二级评论采集-限云采集.xlsx", "评论内容"),
            "知乎": ("知乎关键词搜索问答采集.csv", "回答内容")
        }

        # 验证文件存在
        valid_files = {}
        for platform, (data_path, col) in platforms.items():
            if os.path.exists(data_path):
                console_output(f"  √ {platform}: 文件存在 - {os.path.abspath(data_path)}")
                valid_files[platform] = (data_path, col)
            else:
                console_output(f"  × {platform}: 文件不存在 - {os.path.abspath(data_path)}", "error")

        if not valid_files:
            console_output("无有效数据文件，程序终止", "error")
            return

        results = {}
        quality_reports = {}

        # 分析每个平台
        for platform_name, (data_path, content_column) in valid_files.items():
            df = analyze_platform(data_path, platform_name, content_column)
            if df is not None:
                results[platform_name] = df
                quality_reports[platform_name] = analyze_data_quality(df, platform_name)

        # 生成跨平台比较
        if results:
            create_cross_platform_comparison(results)

        # 生成最终报告摘要
        console_output("\n" + "=" * 80)
        console_output("最终分析报告摘要:")
        console_output(
            f"{'平台':<6} | {'总数据':<6} | {'有效数据':<6} | {'积极率':<6} | {'主导主题':<12} | {'主要关键词'}")
        console_output("-" * 80)

        for platform, report in quality_reports.items():
            if not report:
                continue

            # 计算积极率
            sentiments = report.get("情感分布", {})
            total = sum(sentiments.values())
            positive_rate = sentiments.get("积极", 0) / total * 100 if total > 0 else 0

            # 找出主导主题
            topics = report.get("主题分布", {})
            main_topic = max(topics, key=topics.get) if topics else "未知"

            # 获取主要关键词
            keywords = report.get("TOP10关键词", {})
            main_keywords = ", ".join(list(keywords.keys())[:3]) if keywords else "无"

            console_output(f"{platform:<6} | {report['总数据量']:<6} | {report['有效数据量']:<6} | "
                           f"{positive_rate:.1f}%{'':<2} | {main_topic:<12} | {main_keywords}")

        console_output("=" * 80)

        # 计算总时间
        elapsed_time = time.time() - start_time
        mins, secs = divmod(elapsed_time, 60)
        console_output(f"舆情分析成功完成! 总计耗时: {int(mins)}分{int(secs)}秒")
        console_output("=" * 80)

    except Exception as e:
        console_output(f"主程序异常: {e}", "error")
        traceback.print_exc()
    finally:
        # 程序结束前等待一下，确保所有输出完成
        if 'PYCHARM_HOSTED' in os.environ:
            input("\n按Enter键退出...")


if __name__ == "__main__":
    main()