from transformers import pipeline
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy as np
import os
from pathlib import Path
import logging
import json
import csv
import datetime

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

class ContentAnalyzer:
    def __init__(self, model_name="distilbert-base-uncased-finetuned-sst-2-english"):
        """
        初始化内容分析器。
        
        参数:
            model_name (str): Hugging Face 情感分析模型名称
        """
        try:
            # 下载所有必要的 NLTK 资源
            self.ensure_nltk_data_available()
            self.sentiment_analyzer = pipeline("sentiment-analysis", model=model_name)
            logging.info("ContentAnalyzer initialized successfully.")
        except Exception as e:
            logging.error(f"Failed to initialize ContentAnalyzer: {e}")
            raise
    
    def ensure_nltk_data_available(self):
        """确保NLTK数据可用，如果不可用则尝试从本地目录加载"""
        # 设置NLTK数据目录为项目内的nltk_data文件夹
        nltk_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "nltk_data")
        os.makedirs(nltk_data_dir, exist_ok=True)
        os.environ['NLTK_DATA'] = nltk_data_dir
        nltk.data.path.insert(0, nltk_data_dir)
        
        # 需要的NLTK数据包
        required_packages = [
            'punkt',         # 用于分词
            'averaged_perceptron_tagger',     # 用于词性标注
            'vader_lexicon'  # 用于情感分析
        ]
        
        # 检查每个包是否已下载
        for package in required_packages:
            try:
                # 尝试查找数据
                if package == 'punkt':
                    nltk.data.find('tokenizers/punkt')
                elif package == 'averaged_perceptron_tagger':
                    nltk.data.find('taggers/averaged_perceptron_tagger')
                elif package == 'vader_lexicon':
                    nltk.data.find('sentiment/vader_lexicon')
                logging.info(f"NLTK数据包 {package} 已存在")
            except LookupError:
                try:
                    # 尝试下载
                    logging.info(f"正在下载NLTK数据包 {package}...")
                    nltk.download(package, download_dir=nltk_data_dir, quiet=True)
                    logging.info(f"NLTK数据包 {package} 下载成功")
                except Exception as e:
                    logging.error(f"无法下载NLTK数据包 {package}: {str(e)}")
                    self.download_nltk_data_manually(package, nltk_data_dir)
    
    def download_nltk_data_manually(self, package, nltk_data_dir):
        """提供离线安装NLTK数据的指南"""
        logging.warning(f"""
        无法自动下载NLTK数据包 {package}。请按照以下步骤手动安装:
        
        1. 在有网络的电脑上，运行Python并执行:
           import nltk
           nltk.download('{package}')
           
        2. 找到NLTK数据目录(通常在用户目录下的nltk_data文件夹)
        
        3. 将数据复制到: {nltk_data_dir}
           - 对于punkt: 复制tokenizers/punkt到{nltk_data_dir}/tokenizers/
           - 对于averaged_perceptron_tagger: 复制taggers/averaged_perceptron_tagger到{nltk_data_dir}/taggers/
           - 对于vader_lexicon: 复制sentiment/vader_lexicon到{nltk_data_dir}/sentiment/
           
        4. 重新运行程序
        """)
    
    def load_transcript(self, file_path):
        """从文件加载转录文本"""
        try:
            file_path = Path(file_path)
            logging.info(f"尝试从 {file_path} 加载转录文本")
            
            if file_path.suffix.lower() == '.json':
                # 处理JSON格式
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    # 检查是否有transcript键
                    if 'transcript' in data:
                        return data['transcript']
                    else:
                        return json.dumps(data)  # 如果没有，返回整个JSON作为文本
            else:
                # 处理普通文本文件
                with open(file_path, 'r', encoding='utf-8') as f:
                    return f.read()
                    
        except Exception as e:
            logging.error(f"加载转录文本失败: {e}")
            return ""

    def analyze_transcript(self, transcript, job_keywords=None):
        """分析转录内容的工作相关性和质量"""
        job_keywords = job_keywords or [
            "streaming", "content", "media", "production", "digital", "creative"
        ]

        if not transcript or len(transcript.strip()) == 0:
            logging.warning("Empty or invalid transcript provided.")
            return self._default_metrics()

        transcript_lower = transcript.lower()
        matched_keywords = [k for k in job_keywords if k.lower() in transcript_lower]
        keyword_relevance = len(matched_keywords) / len(job_keywords) if job_keywords else 0

        # 情感分析，最小文本长度处理
        sentiment_result = self.sentiment_analyzer(transcript[:512])[0]
        confidence_score = (sentiment_result['score'] if sentiment_result['label'] == 'POSITIVE' 
                           else 1.0 - sentiment_result['score'])

        try:
            sentences = sent_tokenize(transcript)
            sentence_count = len(sentences)
            words = word_tokenize(transcript)
            word_count = len(words)
            words_per_sentence = word_count / sentence_count if sentence_count > 0 else 0
        except Exception as e:
            logging.error(f"Tokenization error: {e}")
            return self._default_metrics()

        clarity_score = self._calculate_clarity(words_per_sentence)
        sentence_complexity = self._calculate_complexity(transcript)
        content_quality = (keyword_relevance * 0.2) + (confidence_score * 0.3) + (clarity_score * 0.5)

        return {
            'keyword_relevance': round(keyword_relevance, 2),
            'matched_keywords': matched_keywords,
            'confidence': round(confidence_score, 2),
            'clarity': round(clarity_score, 2),
            'sentence_complexity': round(sentence_complexity, 2),
            'content_quality': round(content_quality, 2),
            'sentence_count': sentence_count,
            'word_count': word_count,
            'words_per_sentence': round(words_per_sentence, 2)
        }

    def _default_metrics(self):
        """返回默认空指标"""
        return {
            'keyword_relevance': 0.0,
            'confidence': 0.0,
            'clarity': 0.0,
            'sentence_complexity': 0.0,
            'content_quality': 0.0,
            'matched_keywords': [],
            'sentence_count': 0,
            'word_count': 0,
            'words_per_sentence': 0.0
        }

    def _calculate_clarity(self, words_per_sentence):
        """计算清晰度评分"""
        if words_per_sentence == 0:
            return 0.0
        elif words_per_sentence < 5:
            return 0.5
        elif words_per_sentence <= 20:
            return 1.0
        else:
            return max(0.3, 1.0 - (words_per_sentence - 20) / 30)

    def _calculate_complexity(self, transcript):
        """计算句子复杂度"""
        try:
            words = word_tokenize(transcript)
            tagged = nltk.pos_tag(words)
            noun_verb_count = len([w for w, pos in tagged if pos.startswith(('NN', 'VB'))])
            total_words = len(words)
            complexity = noun_verb_count / total_words if total_words > 0 else 0
            return min(1.0, complexity * 1.5)
        except Exception as e:
            logging.error(f"Complexity calculation error: {e}")
            return 0.0

    def _generate_html_report(self, metrics, video_name, output_folder):
        """生成HTML报告"""
        html_content = f"""
        <!DOCTYPE html>
        <html lang="zh-CN">
        <head>
            <meta charset="UTF-8">
            <title>内容分析报告 - {video_name}</title>
            <style>
                body {{ font-family: Arial, sans-serif; margin: 20px; }}
                h1 {{ color: #333; }}
                table {{ border-collapse: collapse; width: 80%; }}
                th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
                th {{ background-color: #f2f2f2; }}
            </style>
        </head>
        <body>
            <h1>内容分析报告 - {video_name}</h1>
            <table>
                <tr><th>指标</th><th>值</th></tr>
                <tr><td>关键词相关性</td><td>{metrics['keyword_relevance']}</td></tr>
                <tr><td>匹配的关键词</td><td>{', '.join(metrics['matched_keywords'])}</td></tr>
                <tr><td>信心评分</td><td>{metrics['confidence']}</td></tr>
                <tr><td>清晰度</td><td>{metrics['clarity']}</td></tr>
                <tr><td>句子复杂度</td><td>{metrics['sentence_complexity']}</td></tr>
                <tr><td>内容质量</td><td>{metrics['content_quality']}</td></tr>
                <tr><td>句子数</td><td>{metrics['sentence_count']}</td></tr>
                <tr><td>词数</td><td>{metrics['word_count']}</td></tr>
                <tr><td>每句平均词数</td><td>{metrics['words_per_sentence']}</td></tr>
            </table>
            <p>生成时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
        </body>
        </html>
        """
        html_file = output_folder / "analysis.html"
        with html_file.open('w', encoding='utf-8') as f:
            f.write(html_content)
        return str(html_file)

    def _generate_csv_report(self, metrics, video_name, output_folder):
        """生成CSV报告"""
        csv_file = output_folder / "analysis.csv"
        headers = ['Metric', 'Value']
        rows = [
            ['Keyword Relevance', metrics['keyword_relevance']],
            ['Matched Keywords', ','.join(metrics['matched_keywords'])],
            ['Confidence', metrics['confidence']],
            ['Clarity', metrics['clarity']],
            ['Sentence Complexity', metrics['sentence_complexity']],
            ['Content Quality', metrics['content_quality']],
            ['Sentence Count', metrics['sentence_count']],
            ['Word Count', metrics['word_count']],
            ['Words Per Sentence', metrics['words_per_sentence']]
        ]
        with csv_file.open('w', encoding='utf-8', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(headers)
            writer.writerows(rows)
        return str(csv_file)

    def process_folder(self, transcripts_folder="transcripts", analysis_folder="analysis_results", job_keywords=None):
        """处理转录文件夹中的所有转录文件并保存分析结果到独立文件夹"""
        results = {}
        transcripts_path = Path(transcripts_folder)
        analysis_path = Path(analysis_folder)

        if not analysis_path.exists():
            os.makedirs(analysis_path)
            logging.info(f"Created analysis folder: {analysis_folder}")

        if not transcripts_path.exists():
            logging.error(f"Transcripts folder {transcripts_folder} does not exist.")
            return results

        # 首先检查是否有子文件夹结构
        subfolder_found = False
        for subfolder in transcripts_path.iterdir():
            if subfolder.is_dir():
                subfolder_found = True
                transcript_file = subfolder / "transcript.txt"
                if transcript_file.exists():
                    try:
                        transcript = self.load_transcript(transcript_file)
                        
                        # 即使转录为空也继续处理，使用默认指标
                        if not transcript:
                            logging.warning(f"{transcript_file} 中转录内容为空，使用默认分析指标")
                            analysis_result = self._default_metrics()
                        else:
                            analysis_result = self.analyze_transcript(transcript, job_keywords)
                        
                        video_analysis_folder = analysis_path / subfolder.name
                        if not video_analysis_folder.exists():
                            os.makedirs(video_analysis_folder)
                            logging.info(f"Created video analysis folder: {video_analysis_folder}")

                        analysis_json_file = video_analysis_folder / "analysis.json"
                        with analysis_json_file.open('w', encoding='utf-8') as f:
                            json.dump(analysis_result, f, ensure_ascii=False, indent=2)

                        html_file = self._generate_html_report(analysis_result, subfolder.name, video_analysis_folder)
                        csv_file = self._generate_csv_report(analysis_result, subfolder.name, video_analysis_folder)

                        results[subfolder.name] = {
                            'transcript_file': str(transcript_file),
                            'analysis_json_file': str(analysis_json_file),
                            'html_report': html_file,
                            'csv_report': csv_file,
                            'metrics': analysis_result
                        }
                        logging.info(f"Processed transcript from {transcript_file}")
                    except Exception as e:
                        logging.error(f"Error processing transcript {transcript_file}: {e}")
                else:
                    logging.warning(f"No transcript.txt found in {subfolder}")
        
        # 如果没有找到子文件夹结构，则直接处理transcripts文件夹中的所有JSON文件
        if not subfolder_found:
            logging.info(f"没有找到子文件夹结构，直接处理{transcripts_folder}中的JSON文件")
            for file_path in transcripts_path.glob("*.json"):
                try:
                    video_id = file_path.stem  # 使用文件名（不含扩展名）作为视频ID
                    transcript = self.load_transcript(file_path)
                    
                    # 即使转录为空也继续处理，使用默认指标
                    if not transcript:
                        logging.warning(f"{file_path} 中转录内容为空，使用默认分析指标")
                        analysis_result = self._default_metrics()
                    else:
                        analysis_result = self.analyze_transcript(transcript, job_keywords)
                    
                    video_analysis_folder = analysis_path / video_id
                    if not video_analysis_folder.exists():
                        os.makedirs(video_analysis_folder)
                        logging.info(f"创建视频分析文件夹: {video_analysis_folder}")

                    analysis_json_file = video_analysis_folder / "analysis.json"
                    with analysis_json_file.open('w', encoding='utf-8') as f:
                        json.dump(analysis_result, f, ensure_ascii=False, indent=2)

                    html_file = self._generate_html_report(analysis_result, video_id, video_analysis_folder)
                    csv_file = self._generate_csv_report(analysis_result, video_id, video_analysis_folder)

                    results[video_id] = {
                        'transcript_file': str(file_path),
                        'analysis_json_file': str(analysis_json_file),
                        'html_report': html_file,
                        'csv_report': csv_file,
                        'metrics': analysis_result
                    }
                    logging.info(f"处理来自 {file_path} 的转录")
                except Exception as e:
                    logging.error(f"处理转录文件 {file_path} 时出错: {e}")
                    import traceback
                    logging.error(traceback.format_exc())

        return results

if __name__ == "__main__":
    analyzer = ContentAnalyzer()
    custom_keywords = ["streaming", "media", "production"]
    results = analyzer.process_folder("transcripts", "analysis_results", job_keywords=custom_keywords)
    
    print("\n分析结果汇总:")
    for video_name, result in results.items():
        print(f"视频: {video_name}")
        print(f"  转录文件: {result['transcript_file']}")
        print(f"  JSON分析文件: {result['analysis_json_file']}")
        print(f"  HTML报告: {result['html_report']}")
        print(f"  CSV报告: {result['csv_report']}")
        print(f"  指标: {result['metrics']}")
        print()