import requests
import json
import concurrent.futures
import os
import re
from abc import ABC, abstractmethod
from typing import List, Dict, Callable, Tuple
from tqdm import tqdm
import pandas as pd
from sklearn.metrics import accuracy_score, f1_score, classification_report, mean_absolute_error
import numpy as np

class DataProcessor(ABC):
    def __init__(self, data_dir, max_threads=1):
        self.data_dir = data_dir
        self.max_threads = max_threads

    @abstractmethod
    def get_train_examples(self):
        pass

    @abstractmethod
    def get_test_examples(self):
        pass

    @abstractmethod
    def stringify_prediction(self, pred):
        pass


class BinaryClassificationTask(DataProcessor):
    categories = ['No', 'Yes']

    def stringify_prediction(self, pred):
        return BinaryClassificationTask.categories[pred]


class EthosBinaryTask(BinaryClassificationTask):
    categories = ['No', 'Yes']

    def get_train_examples(self):
        df = pd.read_csv(self.data_dir + '/ethos_ishate_binary_shuf.csv', sep=';', header=None)
        df = df[(df[1] <= 0) | (df[1] >= 0.7)]
        exs = df.reset_index().to_dict('records')
        exs = [{'id': x['index'], 'text': x[0], 'label': 1 if x[1] > 0.4 else 0} for x in exs[200:]]
        return exs
    
    def get_test_examples(self):
        df = pd.read_csv(self.data_dir + '/ethos_ishate_binary_shuf.csv', sep=';', header=None)
        df = df[(df[1] <= 0) | (df[1] >= 0.7)]
        exs = df.reset_index().to_dict('records')
        exs = [{'id': x['index'], 'text': x[0], 'label': 1 if x[1] > 0.4 else 0} for x in exs[:200]]
        return exs


class JailbreakBinaryTask(BinaryClassificationTask):
    categories = ['No', 'Yes']

    def get_train_examples(self):
        exs = []
        for i, l in enumerate(open(self.data_dir + '/train.tsv')):
            convo, label = l.strip().split('\t')
            label = int(label)
            text = ' '.join([x['text'].strip() for x in json.loads(convo) if x['role'] == 'user'])
            exs.append({'id': i, 'text': text, 'label': label})
        return exs
    
    def get_test_examples(self):
        exs = []
        for i, l in enumerate(open(self.data_dir + '/test.tsv')):
            convo, label = l.strip().split('\t')
            label = int(label)
            text = ' '.join([x['text'].strip() for x in json.loads(convo) if x['role'] == 'user'])
            exs.append({'id': i, 'text': text, 'label': label})
        return exs


class DefaultHFBinaryTask(BinaryClassificationTask):
    categories = ['No', 'Yes']

    def get_train_examples(self):
        exs = []
        for i, row in enumerate(open(self.data_dir + '/train.jsonl')):
            row = json.loads(row.strip())
            exs.append({'id': f'train-{i}', 'label': row['label'], 'text': row['text']})
        return exs
    
    def get_test_examples(self):
        exs = []
        for i, row in enumerate(open(self.data_dir + '/test.jsonl')):
            row = json.loads(row.strip())
            exs.append({'id': f'test-{i}', 'label': row['label'], 'text': row['text']})
        return exs

#从这开始为ASAP作文评分任务
class ASAPTask(DataProcessor):
    def __init__(self, data_dir, essay_set_id, max_threads=1, train_ratio=0.7, val_ratio=0.15):
        """
        初始化ASAP作文评分任务处理器
        
        Args:
            data_dir: 数据目录
            essay_set_id: 作文集ID (1-8)
            max_threads: 最大线程数
            train_ratio: 训练集比例
            val_ratio: 验证集比例
        """
        super().__init__(data_dir, max_threads)
        self.essay_set_id = essay_set_id
        self.train_ratio = train_ratio
        self.val_ratio = val_ratio
        
        # 加载数据
        self.prompt = self._load_prompt()
        self.scoring_rubric = self._load_scoring_rubric()
        self.score_range = self._load_score_range()
        self.essays_df = self._load_essays()
        
        # 划分数据集
        self.train_ids, self.val_ids, self.test_ids = self._split_dataset()

    def _load_prompt(self):
        """加载作文题目"""
        prompt_path = os.path.join(self.data_dir, 'ASAP', 'prompts', f'Essaay Set #{self.essay_set_id}.txt')
        with open(prompt_path, 'r', encoding='utf-8') as f:
            return f.read().strip()
    
    def _load_scoring_rubric(self):
        """加载评分标准"""
        rubric_path = os.path.join(self.data_dir, 'ASAP', 'scoring_rubric', f'Essaay Set #{self.essay_set_id}.txt')
        with open(rubric_path, 'r', encoding='utf-8') as f:
            return f.read().strip()
    
    def _load_score_range(self):
        """加载分数范围"""
        score_range_path = os.path.join(self.data_dir, 'ASAP', 'score range.xlsx')
        score_df = pd.read_excel(score_range_path)
        essay_row = score_df[score_df['essay_set'] == self.essay_set_id].iloc[0]
        return {
            'min_score': essay_row['min_score'],
            'max_score': essay_row['max_score']
        }
    
    def _load_essays(self):
        """加载作文数据"""
        essays_path = os.path.join(self.data_dir, 'ASAP', 'training_set_rel3.xlsx')
        df = pd.read_excel(essays_path)
        return df[df['essay_set'] == self.essay_set_id].copy()
    
    def _split_dataset(self, train_size=60, val_size=70, test_size=70, max_samples=200):
        """
        划分数据集为训练集、验证集和测试集
        
        Args:
            train_size: 训练集样本数
            val_size: 验证集样本数
            test_size: 测试集样本数
            max_samples: 每个作文集最大样本数
            
        Returns:
            训练集ID列表，验证集ID列表，测试集ID列表
        """
        essay_ids = self.essays_df['essay_id'].tolist()
        np.random.seed(42)  # 设置随机种子确保可重复性
        np.random.shuffle(essay_ids)
        
        # 限制每个作文集的最大样本数
        if len(essay_ids) > max_samples:
            essay_ids = essay_ids[:max_samples]
        
        # 确保样本数量足够
        total_needed = train_size + val_size + test_size
        if len(essay_ids) < total_needed:
            print(f"警告: 作文集 {self.essay_set_id} 只有 {len(essay_ids)} 篇作文，少于请求的 {total_needed} 篇")
            # 按比例缩小各集合大小
            ratio = len(essay_ids) / total_needed
            train_size = int(train_size * ratio)
            val_size = int(val_size * ratio)
            test_size = len(essay_ids) - train_size - val_size
        
        train_ids = essay_ids[:train_size]
        val_ids = essay_ids[train_size:train_size+val_size]
        test_ids = essay_ids[train_size+val_size:train_size+val_size+test_size]
        
        return train_ids, val_ids, test_ids
    
    def insert_optimization_markers(self, scoring_rubric):
        """
        在评分标准中插入优化标记
        
        Args:
            scoring_rubric: 原始评分标准文本
            
        Returns:
            带有优化标记的评分标准文本
        """
        # 使用正则表达式识别评分标准中的各个部分
        # 假设评分标准的格式是"Score Point X-Y: Description"，后面跟着一些要点
        
        # 分割评分标准为不同的分数段落
        score_sections = re.split(r'(Score Point \d+(?:-\d+)?:)', scoring_rubric)
        
        if len(score_sections) <= 1:
            # 如果没有找到标准格式，则在整个文本前添加一个标记
            return f"[X1] {scoring_rubric}"
        
        marked_rubric = ""
        marker_idx = 1
        
        # 处理第一部分（如果有前导文本）
        if not score_sections[0].startswith("Score Point"):
            marked_rubric += f"[X{marker_idx}] {score_sections[0].strip()}\n"
            marker_idx += 1
            score_sections = score_sections[1:]
        
        # 处理每个分数段落
        for i in range(0, len(score_sections), 2):
            if i+1 < len(score_sections):
                # 添加分数段标题的标记
                score_header = score_sections[i].strip()
                marked_rubric += f"[X{marker_idx}] {score_header} "
                marker_idx += 1
                
                # 添加分数段描述的标记
                description = score_sections[i+1].strip()
                
                # 分割描述和要点列表
                desc_parts = description.split('\n•')
                main_desc = desc_parts[0].strip()
                marked_rubric += f"[X{marker_idx}] {main_desc}\n"
                marker_idx += 1
                
                # 处理要点列表
                if len(desc_parts) > 1:
                    for point in desc_parts[1:]:
                        marked_rubric += f"• [X{marker_idx}] {point.strip()}\n"
                        marker_idx += 1
                
                marked_rubric += "\n"
        
        return marked_rubric.strip()
    
    def _create_example(self, essay_row, include_rubric=True, use_marked_rubric=False):
        """
        创建一个作文评分示例
        
        Args:
            essay_row: 作文数据行
            include_rubric: 是否包含评分标准
            use_marked_rubric: 是否使用带标记的评分标准
            
        Returns:
            包含作文评分任务所需信息的字典
        """
        essay_id = essay_row['essay_id']
        essay_text = essay_row['essay']
        score = essay_row['domain1_score']
        
        example = {
            'id': essay_id,
            'prompt': self.prompt,
            'essay': essay_text,
            'score': score,
            'score_range': self.score_range
        }
        
        if include_rubric:
            if use_marked_rubric:
                example['scoring_rubric'] = self.insert_optimization_markers(self.scoring_rubric)
                example['original_rubric'] = self.scoring_rubric
            else:
                example['scoring_rubric'] = self.scoring_rubric
        
        return example
    
    def get_train_examples(self, include_rubric=True, use_marked_rubric=True):
        """获取训练集示例"""
        train_df = self.essays_df[self.essays_df['essay_id'].isin(self.train_ids)]
        return [self._create_example(row, include_rubric, use_marked_rubric) 
                for _, row in train_df.iterrows()]
    
    def get_val_examples(self, include_rubric=True, use_marked_rubric=False):
        """获取验证集示例"""
        val_df = self.essays_df[self.essays_df['essay_id'].isin(self.val_ids)]
        return [self._create_example(row, include_rubric, use_marked_rubric) 
                for _, row in val_df.iterrows()]
    
    def get_test_examples(self, include_rubric=True, use_marked_rubric=False):
        """获取测试集示例"""
        test_df = self.essays_df[self.essays_df['essay_id'].isin(self.test_ids)]
        return [self._create_example(row, include_rubric, use_marked_rubric) 
                for _, row in test_df.iterrows()]
    
    def stringify_prediction(self, pred):
        """将预测转换为字符串"""
        return str(pred)
    
    def evaluate_predictions(self, true_scores, pred_scores):
        """
        评估预测分数
        
        Args:
            true_scores: 真实分数列表
            pred_scores: 预测分数列表
            
        Returns:
            包含评估指标的字典
        """
        # 确保预测分数在有效范围内
        min_score = self.score_range['min_score']
        max_score = self.score_range['max_score']
        pred_scores = [max(min(p, max_score), min_score) for p in pred_scores]
        
        # 计算评估指标
        mae = mean_absolute_error(true_scores, pred_scores)
        
        # 计算相关系数
        corr = np.corrcoef(true_scores, pred_scores)[0, 1]
        
        # 计算准确率（完全匹配）
        accuracy = sum(t == p for t, p in zip(true_scores, pred_scores)) / len(true_scores)
        
        # 计算差距为1以内的准确率
        accuracy_1 = sum(abs(t - p) <= 1 for t, p in zip(true_scores, pred_scores)) / len(true_scores)
        
        return {
            'mae': mae,
            'correlation': corr,
            'exact_accuracy': accuracy,
            'accuracy_within_1': accuracy_1
        }
