"""
Qwen Ablation Experiment System - Analyzing Nine Directives
Three Readability and Two Relevance Metrics, Efficient Implementation
pycharm or other
pip install openai textstat rouge-score pandas numpy matplotlib seaborn tqdm bert-score loguru huggingface_hub[hf_xet]
if kaggle:
!pip install openai textstat rouge-score pandas numpy matplotlib seaborn tqdm bert-score loguru huggingface_hub[hf_xet]
if colab:
!pip install openai textstat rouge-score pandas numpy matplotlib seaborn tqdm bert-score loguru huggingface_hub[hf_xet]
"""
from transformers import logging
logging.set_verbosity_warning()
logging.set_verbosity_error()
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import os
import json
import pandas as pd
import numpy as np
from datetime import datetime
import time
from typing import List, Dict, Optional
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from openai import OpenAI
import textstat
from rouge_score import rouge_scorer
from loguru import logger

# Optional BERTScore
try:
    from bert_score import score as compute_bert_score
    BERT_AVAILABLE = True
except ImportError:
    BERT_AVAILABLE = False
    logger.warning("BERTScore unavailable - using ROUGE only")

# Set matplotlib font to avoid font issues
plt.rcParams['font.sans-serif'] = ['Arial', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

class AblationConfig:
    """Configuration specific to the ablation experiment"""
    API_KEY = "sk-92bb68e965964fadb567f63dc71c5fea"
    BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
    MODEL_NAME = "qwen-plus"
    SAMPLE_SIZE = 10
    MAX_RETRIES = 2
    AUTISM_RULES = {
        1: "Give ACCURATE, factual information first - never guess or speculate",
        2: "Use simple, common words (avoid technical, complex, or academic terms)",
        3: "Write EXTREMELY short sentences (MAXIMUM 7 words each - count carefully!)",
        4: "Be direct and clear - no metaphors or figures of speech",
        5: "Give concrete, specific information with examples when helpful",
        6: "Keep total answer under 35 words total",
        7: "End EVERY sentence with proper punctuation.",
        8: "Use simple present tense when possible",
        9: "Avoid passive voice - use active voice only"
    }
    TSV_FILE = "/kaggle/input/dddddd/question_train_answerable.tsv"
    OUTPUT_DIR = "/kaggle/working/"

config = AblationConfig()
logger.info(f"Ablation Experiment System - Sample size: {config.SAMPLE_SIZE}")

def load_data(file_path: str, n: int) -> List[Dict]:
    """Load data efficiently"""
    try:
        df = pd.read_csv(file_path, sep='\t', nrows=n * 2)
        samples = []
        for _, row in df.iterrows():
            q, a = str(row.get('question', '')).strip(), str(row.get('answers', '')).strip()
            if q and a and len(q) > 5 and len(a) > 10:
                samples.append({'id': str(row.get('id', f'q_{len(samples)}')), 'question': q, 'reference': a})
                if len(samples) >= n:
                    break
        logger.info(f"Loaded {len(samples)} valid samples.")
        return samples
    except Exception as e:
        logger.error(f"Data loading failed: {e}")
        return []

class PromptGenerator:
    """Generate prompts for Qwen"""
    @staticmethod
    def generate_system_prompt(excluded_rule: Optional[int] = None, use_all: bool = True) -> str:
        active_rules = [f"{rule_id}. {text}" for rule_id, text in config.AUTISM_RULES.items() if excluded_rule is None or rule_id != excluded_rule]
        prompt = "You are an expert assistant specialized in creating autism-friendly explanations.\n\nAUTISM-FRIENDLY REQUIREMENTS:\n" + "\n".join(active_rules) + "\n\nGoal: Make information simple and accessible for people with autism."
        return prompt

    @staticmethod
    def generate_user_prompt(question: str) -> str:
        return f"Question: {question}\nCreate an autism-friendly answer following the rules above.\nAnswer:"

class FastEvaluator:
    """Evaluator focusing on key metrics"""
    def __init__(self):
        self.rouge_scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True)

    def evaluate(self, generated: str, reference: str) -> Dict[str, float]:
        """Evaluate generated answer against the reference"""
        if not generated.strip():
            return {'rouge_l_f1': 0.0, 'bert_score_f1': 0.0, 'flesch_readability_ease': 0.0, 'dale_chall_score': 10.0, 'gunning_fog_index': 15.0}
        try:
            rouge_score = self.rouge_scorer.score(reference, generated)['rougeL'].fmeasure
            bert_f1_score = (compute_bert_score([generated], [reference], lang='en') if BERT_AVAILABLE else 0.0)
            flesch_score = textstat.flesch_reading_ease(generated)
            dale_chall = textstat.dale_chall_readability_score(generated)
            gunning_fog = textstat.gunning_fog(generated)
            return {'rouge_l_f1': rouge_score, 'bert_score_f1': bert_f1_score,
                    'flesch_readability_ease': flesch_score,
                    'dale_chall_score': dale_chall,
                    'gunning_fog_index': gunning_fog}
        except Exception as e:
            logger.error(f"Evaluation error: {e}")
            return {'rouge_l_f1': 0.0, 'bert_score_f1': 0.0, 'flesch_reading_ease': 0.0, 'dale_chall_score': 10.0, 'gunning_fog_index': 15.0}


class FastQwenClient:
    """Client for Qwen API interaction"""
    def __init__(self):
        self.client = OpenAI(api_key=config.API_KEY, base_url=config.BASE_URL)

    def generate_answer(self, question: str, system_prompt: str, user_prompt: str) -> str:
        """Generate answer from Qwen"""
        try:
            response = self.client.chat.completions.create(
                model=config.MODEL_NAME,
                messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
                temperature=0.1,
                max_tokens=150,
                timeout=30
            )
            return response.choices[0].message.content.strip()
        except Exception as e:
            logger.error(f"Generation failed: {e}")
            return ""

class AblationEngine:
    """Run ablation experiment"""
    def __init__(self):
        self.client = FastQwenClient()
        self.evaluator = FastEvaluator()
        self.results = {}

    def run_ablation_study(self, data_samples: List[Dict]) -> Dict:
        """Run the complete ablation study"""
        logger.info(f"Starting ablation study - {len(data_samples)} samples")
        experiments = [('baseline', 'All 9 Rules', None, True), ('no_rules', 'No Rules', None, False)]
        experiments += [(f'remove_rule_{rule_id}', f'Remove Rule {rule_id}', rule_id, True) for rule_id in config.AUTISM_RULES.keys()]

        for exp_name, exp_desc, excluded_rule, use_all in tqdm(experiments, desc="Running experiments"):
            logger.info(f"Experiment: {exp_desc}")
            system_prompt = PromptGenerator.generate_system_prompt(excluded_rule, use_all)
            exp_scores = []

            for sample in data_samples[:config.SAMPLE_SIZE]:
                user_prompt = PromptGenerator.generate_user_prompt(sample['question'])
                answer = self.client.generate_answer(sample['question'], system_prompt, user_prompt)
                if answer:
                    scores = self.evaluator.evaluate(answer, sample['reference'])
                    exp_scores.append(scores)

            if exp_scores:
                avg_scores = {metric: np.mean([s[metric] for s in exp_scores]) for metric in exp_scores[0].keys()}
                self.results[exp_name] = {'description': exp_desc, 'excluded_rule': excluded_rule, 'scores': avg_scores, 'sample_count': len(exp_scores)}
                logger.info(f"✅ Completed: {len(exp_scores)} samples")
            else:
                logger.warning(f"❌ Experiment failed: {exp_desc}")

        return self.results


class AblationVisualizer:
    """Generate visualizations for ablation experiment results"""

    @staticmethod
    def create_ablation_plots(results: Dict, output_dir: str):
        """Generate ablation experiment plots for each metric separately"""
        metrics_data = {metric: [] for metric in
                        ['rouge_l_f1', 'bert_score_f1', 'flesch_readability_ease', 'dale_chall_score',
                         'gunning_fog_index']}

        # Prepare data for each metric
        for exp_name in results.keys():
            result = results[exp_name]
            for metric in metrics_data.keys():
                metrics_data[metric].append(result['scores'].get(metric, 0))

        exp_labels = list(results.keys())

        # Draw separate plots for each metric
        for metric, title in zip(metrics_data.keys(),
                                 ['Relevance Metrics Comparison',
                                  'BERT Score (Higher = Better)',
                                  'Flesch Readability Score (Higher = Easier)',
                                  'Dale-Chall Score (Lower = Easier)',
                                  'Gunning Fog Index (Lower = Easier)']):
            plt.figure(figsize=(10, 6))
            plt.bar(exp_labels, metrics_data[metric], color='lightblue', alpha=0.7)
            plt.title(title)
            plt.ylabel(metric)
            plt.xticks(rotation=45, ha='right')
            plt.tight_layout()

            # Save plot
            plot_path = os.path.join(output_dir,
                                     f'ablation_study_{metric}_{datetime.now().strftime("%Y%m%d_%H%M%S")}.png')
            plt.savefig(plot_path, dpi=300, bbox_inches='tight')
            plt.close()
            logger.info(f"📊 {title} chart saved: {plot_path}")
class ResultAnalyzer:
    """Tool for analyzing results"""
    @staticmethod
    def analyze_results(results: Dict) -> Dict:
        """Analyze the results of the ablation experiment"""
        if 'baseline' not in results:
            return {}

        baseline_scores = results['baseline']['scores']
        analysis = {
            'baseline_performance': baseline_scores,
            'rule_importance_ranking': [],
            'best_single_rule_removal': None,
            'worst_single_rule_removal': None,
            'overall_insights': []
        }

        rule_impacts = []
        for rule_id in config.AUTISM_RULES.keys():
            exp_name = f'remove_rule_{rule_id}'
            if exp_name in results:
                rule_scores = results[exp_name]['scores']
                total_impact = 0
                metric_count = 0

                for metric, baseline_val in baseline_scores.items():
                    if baseline_val > 0:
                        current_val = rule_scores[metric]
                        if metric in ['dale_chall_score', 'gunning_fog_index']:
                            impact = (baseline_val - current_val) / baseline_val
                        else:
                            impact = (current_val - baseline_val) / baseline_val

                        total_impact += impact
                        metric_count += 1

                avg_impact = total_impact / metric_count if metric_count > 0 else 0
                rule_impacts.append({'rule_id': rule_id, 'rule_text': config.AUTISM_RULES[rule_id][:50], 'avg_impact': avg_impact})

        rule_impacts.sort(key=lambda x: abs(x['avg_impact']), reverse=True)
        analysis['rule_importance_ranking'] = rule_impacts
        analysis['best_single_rule_removal'] = min(rule_impacts, key=lambda x: -x['avg_impact'], default=None)
        analysis['worst_single_rule_removal'] = max(rule_impacts, key=lambda x: -x['avg_impact'], default=None)

        insights = []
        if 'no_rules' in results:
            no_rules_scores = results['no_rules']['scores']

            # 新增：打印输出检查
            logger.info(f"Baseline Scores: {baseline_scores}")
            logger.info(f"No Rules Scores: {no_rules_scores}")

            # 确保键存在后再进行比较
            if baseline_scores['rouge_l_f1'] > no_rules_scores.get('rouge_l_f1', 0.0):
                insights.append("Using all rules significantly improves relevance compared to no rules.")

            baseline_flesch = baseline_scores.get('flesch_reading_ease', 0.0)
            no_rules_flesch = no_rules_scores.get('flesch_reading_ease', 0.0)

            # 使用安全的获取方法避免KeyError
            if baseline_flesch > no_rules_flesch:
                insights.append("Using all rules significantly enhances readability compared to no rules.")
        analysis['overall_insights'] = insights

        return analysis
# Convert results to native types before saving to JSON
def convert_to_float(val):
    """Convert numpy float types to native Python float."""
    if isinstance(val, (np.float32, np.float64)):
        return float(val)
    elif isinstance(val, list):
        return [convert_to_float(i) for i in val]
    elif isinstance(val, dict):
        return {k: convert_to_float(v) for k, v in val.items()}
    return val
def run_ablation_experiment() -> bool:
    """Run the complete ablation experiment"""
    logger.info("🧪 Starting Qwen Ablation Experiment System")
    start_time = time.time()
    data_samples = load_data(config.TSV_FILE, config.SAMPLE_SIZE)

    if not data_samples:
        logger.error("Data loading failed")
        return False

    os.makedirs(config.OUTPUT_DIR, exist_ok=True)
    engine = AblationEngine()
    results = engine.run_ablation_study(data_samples)

    if not results:
        logger.error("Ablation experiment failed")
        return False

    logger.info("📊 Analyzing experiment results...")
    analysis = ResultAnalyzer().analyze_results(results)

    logger.info("🎨 Generating visualization charts...")
    visualizer = AblationVisualizer()
    visualizer.create_ablation_plots(results, config.OUTPUT_DIR)

    # Convert results to native types before saving to JSON
    results_serializable = {
        'raw_results': {
            k: {
                **v,
                'scores': {
                    metric: convert_to_float(score) for metric, score in v['scores'].items()
                }
            }
            for k, v in results.items()
        },
        'analysis': {
            key: convert_to_float(val) for key, val in analysis.items()
        },
        'processing_time': time.time() - start_time
    }
    logger.info(f"Results before serialization: {results_serializable}")
    # Save results
    with open(os.path.join(config.OUTPUT_DIR, f'ablation_results_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json'), 'w', encoding='utf-8') as f:
        json.dump(results_serializable, f, ensure_ascii=False, indent=2)

    return True

if __name__ == "__main__":
    logger.add("/kaggle/working/experiment.log", rotation="1 MB")
    success = run_ablation_experiment()
    logger.info("Experiment completed." if success else "Experiment failed.")