
from typing import Any, Callable, Optional, Union, Sequence

from loguru import logger
from agent.flex_service_toolkit import FlexServiceToolkit
from agentscope.exception import ResponseParsingError, FunctionCallError
from agentscope.agents import AgentBase
from agentscope.memory.temporary_memory import TemporaryMemory
from agentscope.message import Msg
from agentscope.parsers import MarkdownJsonDictParser
from agentscope.service.service_toolkit import ServiceFunction
from agentscope.service import (
    ServiceToolkit,
    ServiceResponse,
    ServiceExecStatus,
)
import json


INSTRUCTION_SCORE_PROMPT_CN = """
你是一名经验丰富的算法课程评分专家，你的任务是：基于学生对一个章节的学习日志、对话流，按照指定的评分标准进行评分。
但是不同于普通老师，为了提高得分的区分度，你需要作为一名非常挑剔的专家，根据评分标准仔细挑剔学生的学习日志，并给出评分报告。

重要的挑剔点：
1. 存在错误的概念理解
2. 存在矛盾的概念理解
3. 存在不清晰、不连贯的概念表达
4. 其他你认为可以挑剔的地方

下面是学生的整个学习日志和对话流：
"""

CHAPTER_HINT_CN ="""
上面就是学生的整个学习日志和对话流，请进行下面的各条章节评分标准进行挑剔与评分。
"""

RESPONSE_HINT_CN = """
你的返回必须包含：
对于每条章节评分标准，报告学生的得分以及挑剔的结果。
例如：
```
{'评分标准1': {'得分': 8, '挑剔报告': '答案正确，但对于xxx概念表述有误'},
 '评分标准2': {'得分': 3, '挑剔报告': '答案错误，对于xxx概念理解完全错误，导致回答有误'}
}
```
"""

INSTRUCTION_SCORE_PROMPT_EN = """
You are a experienced algorithm course scoring expert, your task is to: Based on the students' learning logs and dialogues, score them according to the specified scoring standard.
But unlike a normal teacher, to improve the distinction of the score, you need to be a very critical expert, carefully pick out the students' learning logs according to the scoring standard and give a score report.

The important points to pick out:
1. There are errors in the concept understanding
2. There are contradictions in the concept understanding
3. There are unclear, incoherent concept expressions
4. Other places you think can be picked out
Below is the students' entire learning log and dialogue:
"""

CHAPTER_HINT_EN = """
Above is the students' entire learning log and dialogue, please pick out according to the following chapter scoring standard and score them.
"""

RESPONSE_HINT_EN="""
Your response must contain:
For each chapter scoring standard, report the students' score and pick out the results.
For example:
```
{'Score standard 1': {'Score': 8, 'Pick out report': 'The answer is correct, but for the xxx concept expression is wrong'},
 'Score standard 2': {'Score': 3, 'Pick out report': 'The answer is wrong, for the xxx concept understanding is completely wrong, causing the answer to be wrong'}
```
"""


class ScoreAgent(AgentBase):
    """
    ScoreAgent is a agent that can score students' learning logs.
    """
    def __init__(self, name: str, model_config_name: str, memory: TemporaryMemory,
                 chapter_score_prompt: str,
                 service_toolkit: ServiceToolkit, 
                 sys_prompt: str = "You're a helpful assistant.",
                 max_iters: int = 5,
                 verbose: bool = True,
                   **kwargs):
        self.service_toolkit = service_toolkit
        super().__init__(
            name=name,
            sys_prompt=sys_prompt,
            model_config_name=model_config_name,
        )
        self.max_iters = max_iters
        self.verbose = verbose
        if not sys_prompt.endswith("\n"):
            sys_prompt = sys_prompt + "\n"

        self.sys_prompt = "\n".join(
            [
                # The brief intro of the role and target
                sys_prompt.format(name=self.name),
                # The detailed instruction prompt for the agent
                INSTRUCTION_SCORE_PROMPT_CN,
            ],
        )
        self.memory = TemporaryMemory()
        self.memory.add(Msg("system", self.sys_prompt, role="system"))
        
        self.memory.add(memory.get_memory())

        self.memory.add(Msg("system", CHAPTER_HINT_CN, role="system"))

        self.memory.add(Msg("system", chapter_score_prompt, role="system"))

        self.memory.add(Msg("system", RESPONSE_HINT_CN, role="system"))

        self.parser = MarkdownJsonDictParser(
            content_hint={
                "thought": "what you thought",
                "speak": "actual response in correct format mentioned above",
            },
            required_keys=["thought", "speak"],
            # Only print the speak field when verbose is False
            keys_to_content=True if self.verbose else "speak",
        )

    def reply(self, x = None, user_backboard = ""):
        for _ in range(self.max_iters):
            if self.verbose:
                self.speak(f" ITER {_+1} ".center(70, "#"))
            try:
                
                hint_msg = Msg(
                    "system",
                    self.parser.format_instruction+"\n",
                    role="system",
                    echo=self.verbose,
                )
                prompt = self.model.format(self.memory.get_memory(),hint_msg
                    )
                if self.verbose:
                        self.speak(f"API Trigger".center(70, "#"))
                        self.speak(str(prompt))
                res = self.model(
                    prompt,
                    parse_func=self.parser.parse,
                    max_retries=2,
                    ######################
                )

                if self.verbose:
                    self.speak(f"Result Parsed".center(70, "#"))
                    self.speak(str(res.parsed))
                    print(res.parsed)
                self.memory.add(
                        Msg(
                            self.name,
                            self.parser.to_memory(res.parsed),
                            "assistant",
                        ),
                    )
                msg_returned = Msg(
                        self.name,
                        self.parser.to_content(res.parsed),
                        "assistant",
                    )
                return msg_returned
            except ResponseParsingError as e:
                    # Print out raw response from models for developers to debug
                    response_msg = Msg(self.name, e.raw_response, "assistant")
                    self.speak(response_msg)
                    # Re-correct by model itself
                    error_msg = Msg("system", str(e), "system")
                    self.speak(error_msg)
                    self.memory.add([response_msg, error_msg])
            except Exception as e:
                self.speak(f"Error: {e}".center(70, "#"))
                self.speak(f"Retrying".center(70, "#"))
                continue

        return Msg(self.name, "Error: Max iterations reached", "assistant")

