import re
import numpy as np
from tqdm import tqdm
from abc import ABC, abstractmethod
import utils
import requests
import concurrent.futures
from sklearn.metrics import accuracy_score, f1_score, mean_absolute_error
from sklearn.metrics import cohen_kappa_score


def quadratic_weighted_kappa(y_true, y_pred):
    """
    计算二次加权Kappa系数
    
    Args:
        y_true: 真实标签列表
        y_pred: 预测标签列表
        
    Returns:
        QWK评分（-1到1之间，1表示完全一致）
    """
    return cohen_kappa_score(y_true, y_pred, weights='quadratic')


class PromptOptimizer(ABC):
    def __init__(self, args, task, max_threads=1):
        self.opt = args
        self.task = task
        self.max_threads = max_threads
    
    @abstractmethod
    def evaluate(self, prompt, examples, n=100):
        """评估提示词在样例上的表现"""
        pass
    
    @abstractmethod
    def optimize(self, train_examples, val_examples):
        """优化提示词"""
        pass


class ProTeGi(PromptOptimizer):
    """ ProTeGi: Prompt Optimization with Textual Gradients
    """
    def _sample_error_str(self, dataset, texts, labels, preds):
        """ Sample error strings from the given texts, labels, and preds"""
        error_idxs = []
        for i, (l, p) in enumerate(zip(labels, preds)):
            if l != p:
                error_idxs.append(i)

        error_texts = [texts[i] for i in error_idxs]
        error_labels = [labels[i] for i in error_idxs]
        error_preds = [preds[i] for i in error_idxs]
        error_strings = []
        for t, l, p in zip(error_texts, error_labels, error_preds):
            error_string = f'## Example\n'
            error_string += f'Text: \"{t.strip()}\"\nLabel: {dataset.categories[l]}\nPrediction: {dataset.categories[p]}'
            error_strings.append(error_string)
        return error_strings

    def parse_tagged_text(self, text, start_tag, end_tag):
        """ Parse text that is tagged with start and end tags."""
        texts = []
        while True:
            start_index = text.find(start_tag)
            if start_index == -1:
                break
            end_index = text.find(end_tag, start_index)
            if end_index == -1:
                break
            start_index += len(start_tag)
            texts.append(text[start_index:end_index].strip())
            text = text[end_index+len(end_tag):]
        return texts

    def init_net(self, n=1):
        """ Get initial "neural network" based on the task."""
        initialization_prompt = f"""
        I have a task: "{self.task}"
        I'm trying to write a set of sequential steps outlining the process I would follow to complete the task.They are:
        """
        initialization_prompt = '\n'.join([line.lstrip() for line in initialization_prompt.split('\n')])
        init_steps = utils.chatgpt(initialization_prompt, n=n)[0]
        return init_steps

    def prediction(self, steps, ex):
        """ Predict a text."""
        pred_prompt = f"""
        # Task
        { self.task }

        # Steps to Execute the Task
        { steps }

        # Output format
        Answer Yes or No as labels

        # Prediction
        Text: { ex['text'] }
        Label:
        """
        pred_prompt = '\n'.join([line.lstrip() for line in pred_prompt.split('\n')])
        pred = utils.chatgpt(pred_prompt, n=1, temperature=self.opt['temperature'])[0]
        return ex, pred

    def evaluate(self, steps, exs, n=100):
        """ Predict some texts and evaluate Predicted results. """
        while True:
            try:
                labels = []
                preds = []
                texts = []
                with concurrent.futures.ProcessPoolExecutor(max_workers=self.max_threads) as executor:
                    futures = [executor.submit(self.prediction, steps, ex) for ex in exs[:n]]
                    for i, future in tqdm(enumerate(concurrent.futures.as_completed(futures)), total=len(futures),
                                          desc='runing prediction'):
                        ex, pred = future.result()
                        pred = 1 if pred.strip().upper().startswith('YES') else 0
                        texts.append(ex['text'])
                        labels.append(ex['label'])
                        preds.append(pred)

                # accuracy = accuracy_score(labels, preds)
                f1 = f1_score(labels, preds, average='micro')
                break
            except (concurrent.futures.process.BrokenProcessPool, requests.exceptions.SSLError):
                pass
        return f1, texts, labels, preds


    def _cal_loss(self, step, error_string, num_reasons=1, n=1):
        """ Get "loss" for a detailed set of sequential steps based on a error string."""
        loss_prompt = f"""
        I have a task: "{self.task}"

        I'm  writing a set of sequential steps outlining the process I would follow to complete the task. 
        My current steps are: 
        {step}

        But these steps get the following example wrong:
        {error_string}

        give {num_reasons} reasons why these steps could have gotten the example wrong.
        Wrap each reason with <START> and <END>
        """
        loss_prompt = '\n'.join([line.lstrip() for line in loss_prompt.split('\n')])
        res = utils.chatgpt(loss_prompt, n=n)
        wrong_reason = []
        for r in res:
            wrong_reason += self.parse_tagged_text(r, "<START>", "<END>")
        return error_string, wrong_reason


    def cal_loss(self, dataset, steps, texts, labels, preds):
        """Get the reason of each wrong text according to the predicted result"""
        wrongToreason = {}
        error_texts = self._sample_error_str(dataset, texts, labels, preds)
        with concurrent.futures.ProcessPoolExecutor(max_workers=self.max_threads) as executor:
            futures = [executor.submit(self._cal_loss, steps, error_text, self.opt['reasons_per_error'], 1) for error_text in error_texts]
            for i, future in tqdm(enumerate(concurrent.futures.as_completed(futures)), total=len(futures), desc='getting loss'):
                error_text, wrong_reason = future.result()
                wrongToreason[error_text] = wrong_reason
        return wrongToreason


    def _get_gradients(self, steps, error_string, reasons, num_suggestions=1, n=1):
        """ Get "gradients" base on a wrong reason."""
        gradient_prompt = f"""
        I have a task: "{self.task}"

        I'm  writing a set of sequential steps outlining the process I would follow to complete the task. 
        The current steps are: 
        {steps}

        But these steps get the following example wrong:
        {error_string}

        Based on the example the problem with these steps is that {reasons}

        give {num_suggestions} strategies on how to adjust these steps in order to avoid the wrong.
        Wrap each strategy with <START> and <END>.
        """
        gradient_prompt = '\n'.join([line.lstrip() for line in gradient_prompt.split('\n')])
        res = utils.chatgpt(gradient_prompt, n=n)
        suggestions = []
        for r in res:
            suggestions += self.parse_tagged_text(r, "<START>", "<END>")
        return error_string, suggestions


    def get_gradients(self, steps, wrongToreason):
        wrongTosuggestion = {}  # 只保存错误的例子以及对应的建议是为了获取之后修改的steps是否有用，后面就和原因无关了
        futures = []
        with concurrent.futures.ProcessPoolExecutor(max_workers=self.max_threads) as executor:
            for error_text in wrongToreason.keys():
                for error_reason in wrongToreason[error_text]:
                    futures.append(executor.submit(self._get_gradients, steps, error_text, error_reason, self.opt['suggestions_per_error_reason'],1))
            for i, future in tqdm(enumerate(concurrent.futures.as_completed(futures)), total=len(futures), desc='getting gradients'):
                    error_text, suggestions = future.result()
                    if error_text in wrongTosuggestion:
                        wrongTosuggestion[error_text].extend(suggestions)
                    else:
                        wrongTosuggestion[error_text] = suggestions
        return wrongTosuggestion


    def update_net(self, steps, suggestions, n=1):
        """ Update "neural network"( a detailed set of sequential steps ) based on suggestions."""
        update_prompt = f"""
        I have a task: "{self.task}"

        I'm  writing a set of sequential steps outlining the process I would follow to complete the task. 
        The current steps are: 
        {steps}

        But there are adjustment strategies with these steps:
        {suggestions}

        I revise these steps incorporating the adjustment strategies provided. Then I got the following steps:
        """
        update_prompt = '\n'.join([line.lstrip() for line in update_prompt.split('\n')])
        modified_steps = utils.chatgpt(update_prompt, n=n)[0]
        return modified_steps


    def cal_lr(self, steps, wrongTosuggestion):
        suggestionTolr = {}
        for error_text in tqdm(wrongTosuggestion.keys(), total=len(wrongTosuggestion.keys()), desc='calculating learning rate'):
            for suggestion in wrongTosuggestion[error_text]:
                modified_steps = self.update_net(steps, suggestion)
                error_text_dict = {'text': re.search("Text: .*Label: ", error_text, re.S).group().replace('Text: ','').replace('Label: ','').strip(),
                                   'label': re.search("Label: .*Prediction: ", error_text, re.S).group().replace('Label: ','').replace('Prediction: ','').strip()}
                _, pred = self.prediction(modified_steps, error_text_dict)
                if pred.strip().upper() == error_text_dict['label'].strip().upper():
                    suggestionTolr[suggestion] = 1
                else:
                    suggestionTolr[suggestion] = 0
        return suggestionTolr

#-------------------------------------------------------------------------------------------------从这开始为ASAP作文评分任务-------------------------------------------------------------------------------------------------
class RubricOptimizer(PromptOptimizer):
    """
    RubricOptimizer: 优化评分标准的表述方式，使大语言模型能更准确评分
    """
    
    def __init__(self, args, task, max_threads=1, model_name="chatgpt"):
        """
        初始化评分标准优化器
        
        Args:
            args: 优化参数
            task: 任务描述
            max_threads: 最大线程数
            model_name: 使用的模型名称（chatgpt, glm等）
        """
        super().__init__(args, task, max_threads)
        self.model_name = model_name
        # 根据模型名称选择API调用函数
        if model_name == "chatgpt":
            self.api_call = utils.chatgpt
        elif model_name == "glm":
            self.api_call = utils.glm
        else:
            raise ValueError(f"不支持的模型: {model_name}")
    
    def parse_tagged_text(self, text, start_tag, end_tag):
        """解析带标签的文本"""
        texts = []
        while True:
            start_index = text.find(start_tag)
            if start_index == -1:
                break
            end_index = text.find(end_tag, start_index)
            if end_index == -1:
                break
            start_index += len(start_tag)
            texts.append(text[start_index:end_index].strip())
            text = text[end_index+len(end_tag):]
        return texts
    
    def extract_markers(self, rubric):
        """从评分标准中提取标记"""
        pattern = r'\[X\d+\]'
        return re.findall(pattern, rubric)
    
    def replace_markers(self, rubric, replacements):
        """替换评分标准中的标记"""
        result = rubric
        for marker, value in replacements.items():
            # 使用正则表达式确保只替换完整的标记
            pattern = re.escape(marker)
            result = re.sub(pattern, value, result)
        return result
    
    def prediction(self, rubric, example):
        """使用评分标准预测作文分数"""
        pred_prompt = f"""
        # 作文评分任务
        
        ## 作文题目
        {example['prompt']}
        
        ## 评分标准
        {rubric}
        
        ## 分数范围
        {example['score_range']['min_score']} 到 {example['score_range']['max_score']}
        
        ## 评分要求
        请仔细阅读以下学生作文，根据上述评分标准进行评分。只需输出一个分数，不需要解释。
        
        ## 学生作文
        {example['essay']}
        
        ## 评分（只输出一个分数）
        """
        pred_prompt = '\n'.join([line.lstrip() for line in pred_prompt.split('\n')])
        pred_text = self.api_call(pred_prompt, n=1, temperature=self.opt['temperature'])[0]
        
        # 从预测文本中提取分数
        try:
            score = int(pred_text.strip())
        except ValueError:
            score_match = re.search(r'\d+', pred_text)
            if score_match:
                score = int(score_match.group())
            else:
                min_score = example['score_range']['min_score']
                max_score = example['score_range']['max_score']
                score = (min_score + max_score) // 2
        
        # 确保分数在有效范围内
        min_score = example['score_range']['min_score']
        max_score = example['score_range']['max_score']
        score = max(min(score, max_score), min_score)
        
        return example, score
    
    def evaluate(self, rubric, examples, n=100):
        """评估评分标准性能"""
        while True:
            try:
                essays = []
                true_scores = []
                pred_scores = []
                
                with concurrent.futures.ProcessPoolExecutor(max_workers=self.max_threads) as executor:
                    futures = [executor.submit(self.prediction, rubric, ex) for ex in examples[:n]]
                    for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures),
                                      desc='运行评分预测'):
                        ex, score = future.result()
                        essays.append(ex['essay'])
                        true_scores.append(ex['score'])
                        pred_scores.append(score)
                
                # 计算评估指标
                qwk = quadratic_weighted_kappa(true_scores, pred_scores)
                mae = mean_absolute_error(true_scores, pred_scores)
                break
            except (concurrent.futures.process.BrokenProcessPool, requests.exceptions.SSLError):
                pass
        
        return qwk, mae, essays, true_scores, pred_scores
    
    def find_error_examples(self, essays, true_scores, pred_scores, n_samples=5):
        """找出评分错误最严重的例子"""
        # 计算绝对误差
        errors = [abs(t - p) for t, p in zip(true_scores, pred_scores)]
        
        # 按误差排序，获取索引
        sorted_indices = sorted(range(len(errors)), key=lambda i: errors[i], reverse=True)
        
        # 选择误差最大的n_samples个例子
        error_indices = sorted_indices[:n_samples]
        
        error_examples = []
        for idx in error_indices:
            error_example = {
                'essay': essays[idx][:300] + "...",  # 只使用前300个字符
                'true_score': true_scores[idx],
                'pred_score': pred_scores[idx],
                'error': errors[idx]
            }
            error_examples.append(error_example)
        
        return error_examples
    
    def analyze_errors(self, rubric, error_examples):
        """分析评分错误的原因"""
        error_analysis = []
        
        for example in tqdm(error_examples, desc="分析错误原因"):
            analysis_prompt = f"""
            # 作文评分任务
            
            ## 评分标准
            {rubric}
            
            ## 错误例子
            作文片段: "{example['essay']}"
            真实分数: {example['true_score']}
            预测分数: {example['pred_score']}
            
            ## 请分析
            为什么使用这个评分标准会导致这个评分错误？请给出3个具体原因，并用<START>和<END>标记包围每个原因。
            """
            
            response = self.api_call(analysis_prompt, n=1)[0]
            reasons = self.parse_tagged_text(response, "<START>", "<END>")
            
            error_analysis.append({
                'example': example,
                'reasons': reasons
            })
        
        return error_analysis
    
    def generate_improvements(self, rubric, error_analysis):
        """为评分标准生成改进建议"""
        # 提取评分标准中的所有标记
        markers = self.extract_markers(rubric)
        
        improvement_suggestions = []
        
        for analysis in tqdm(error_analysis, desc="生成改进建议"):
            reasons = "\n".join(analysis['reasons'])
            
            suggestion_prompt = f"""
            # 作文评分任务
            
            ## 当前评分标准
            {rubric}
            
            ## 可修改的标记
            {", ".join(markers)}
            
            ## 错误例子
            作文片段: "{analysis['example']['essay']}"
            真实分数: {analysis['example']['true_score']}
            预测分数: {analysis['example']['pred_score']}
            
            ## 错误原因
            {reasons}
            
            ## 请提供建议
            请提供2个具体建议，说明如何修改评分标准中的标记，以解决这些问题。每个建议应明确指出要修改哪个标记，并提供新的替换文本。
            格式：<START>将[X数字]替换为"新文本"<END>
            """
            
            response = self.api_call(suggestion_prompt, n=1)[0]
            suggestions = self.parse_tagged_text(response, "<START>", "<END>")
            
            improvement_suggestions.extend(suggestions)
        
        return improvement_suggestions
    
    def apply_improvements(self, rubric, suggestions):
        """应用改进建议到评分标准"""
        # 创建不同的修改版本
        individual_rubrics = []

        for suggestion in suggestions:
            # 更新正则表达式以匹配带有破折号前缀的建议
            matches = re.findall(r'[^\w]*将\s*(\[X\d+\])\s*替换为\s*"([^"]*)"', suggestion)
            if not matches:
                # 尝试匹配不同类型的引号
                matches = re.findall(r'[^\w]*将\s*(\[X\d+\])\s*替换为\s*"([^"]*)"', suggestion)
            if not matches:
                # 尝试匹配没有引号的情况
                matches = re.findall(r'[^\w]*将\s*(\[X\d+\])\s*替换为\s*(.+?)(?:$|\n|。)', suggestion)
            
            if matches:
                replacements = {marker: text for marker, text in matches}
                if replacements:
                    modified_rubric = self.replace_markers(rubric, replacements)
                    individual_rubrics.append(modified_rubric)

        # 创建累积应用的版本
        all_replacements = {}
        for suggestion in suggestions:
            matches = re.findall(r'[^\w]*将\s*(\[X\d+\])\s*替换为\s*"([^"]*)"', suggestion)
            if not matches:
                # 尝试匹配不同类型的引号
                matches = re.findall(r'[^\w]*将\s*(\[X\d+\])\s*替换为\s*"([^"]*)"', suggestion)
            if not matches:
                matches = re.findall(r'[^\w]*将\s*(\[X\d+\])\s*替换为\s*(.+?)(?:$|\n|。)', suggestion)
            
            if matches:
                for marker, text in matches:
                    all_replacements[marker] = text

        if all_replacements:
            combined_rubric = self.replace_markers(rubric, all_replacements)
            individual_rubrics.append(combined_rubric)

        return individual_rubrics
    
    def evaluate_improvements(self, original_rubric, modified_rubrics, val_examples):
        """评估改进后的评分标准"""
        results = []
        
        # 评估原始评分标准
        original_qwk, original_mae, _, _, _ = self.evaluate(original_rubric, val_examples)
        results.append({
            'rubric': original_rubric,
            'qwk': original_qwk,
            'mae': original_mae,
            'is_original': True
        })
        
        # 评估每个修改后的评分标准
        for rubric in tqdm(modified_rubrics, desc="评估改进"):
            qwk, mae, _, _, _ = self.evaluate(rubric, val_examples)
            results.append({
                'rubric': rubric,
                'qwk': qwk,
                'mae': mae,
                'is_original': False
            })
        
        # 按QWK降序排序
        results.sort(key=lambda x: x['qwk'], reverse=True)
        
        return results
    
    def optimize(self, train_examples, val_examples, iterations=3):
        """优化评分标准"""
        # 获取初始评分标准
        original_rubric = train_examples[0]['scoring_rubric']
        best_rubric = original_rubric
        
        print(f"初始评分标准:\n{original_rubric}\n")
        
        # 初始评估
        print("评估初始评分标准...")
        qwk, mae, _, _, _ = self.evaluate(original_rubric, val_examples[:10])
        print(f"初始QWK: {qwk:.4f}, MAE: {mae:.4f}")
        
        for iteration in range(iterations):
            print(f"\n迭代 {iteration+1}/{iterations}")
            
            # 在训练集上评估
            _, _, essays, true_scores, pred_scores = self.evaluate(best_rubric, train_examples[:20])
            
            # 找出错误例子
            error_examples = self.find_error_examples(essays, true_scores, pred_scores, n_samples=3)
            
            # 分析错误
            error_analysis = self.analyze_errors(best_rubric, error_examples)
            
            # 生成改进建议
            suggestions = self.generate_improvements(best_rubric, error_analysis)
            
            # 应用改进建议
            modified_rubrics = self.apply_improvements(best_rubric, suggestions)
            
            if not modified_rubrics:
                print("没有生成有效的改进建议，结束优化")
                break
            
            # 评估改进
            results = self.evaluate_improvements(best_rubric, modified_rubrics, val_examples[:10])
            
            # 更新最佳评分标准
            if results[0]['qwk'] > qwk:
                best_rubric = results[0]['rubric']
                qwk = results[0]['qwk']
                mae = results[0]['mae']
                print(f"找到更好的评分标准! QWK: {qwk:.4f}, MAE: {mae:.4f}")
            else:
                print("没有找到更好的评分标准，结束优化")
                break
        
        print("\n优化完成!")
        print(f"最终评分标准:\n{best_rubric}")
        print(f"最终QWK: {qwk:.4f}, MAE: {mae:.4f}")
        
        return best_rubric, qwk, mae

