import json
import re
from collections import defaultdict
from operator import itemgetter, attrgetter
from typing import Union, Type, Callable, List, Optional, Any

from autogen import GroupChat, GroupChatManager, ConversableAgent, AssistantAgent, UserProxyAgent, Agent
from utils.configuration import load_config, set_environment, default, exists
import string
from string import Template

config = load_config('./env.yaml')
llm_config = {"config_list": [{"model": config['llm']['model'],
                               "api_key": 'sk-ypDOrct5sWOZEVCUYWmNqK8rsGXcJ6svajXmNrPdEeU5tshv',
                               'base_url': config['llm']['base_url'],
                               }]}

def run_once(func):
    def wrapper(*args, **kwargs):
        if not wrapper.has_run:
            print(f"running {func.__name__}")
            wrapper.has_run = True
            return func(*args, **kwargs)
    wrapper.has_run = False
    return wrapper

@run_once
def update_system_prompt(prompt: str,experts:list):
    for expert in experts:
        expert.update_system_message(
            prompt)





def chunkify(lst, n):
    for tup in zip(*[iter(lst)]*n):
        yield tup
    rest = tuple(lst[len(lst)//n*n: ])
    if rest:
        yield rest
class CustomUserProxyAgent(AssistantAgent):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)


def extract_last_expert_response(response: list[dict], index):
    r = []
    for resp in response:
        if resp['role'] == 'user':
            _resp = resp.copy()
            _resp['role'] = f'expert_{index}'
            r.append(_resp)
    return r[-1]


def extract_last_critic_response(response: list[dict], index):
    r = []
    for resp in response:
        if resp['role'] == 'user':
            _resp = resp.copy()
            _resp['role'] = f'critic_{index}'
            r.append(_resp)
    return r[-1]


def vote_for_expert(response: list[dict]) -> dict[str, str]:
    out = {}
    for resp in response:
        content = resp['content']
        try:
            res = json.loads(content)
            out[resp['role']] = res['expert']
        except Exception as e:
            res = re.findall("expert_([0-9]+)", content)
            out[resp['role']] = res[0]

    return out


class EnsembleExperts:
    critic_prompt = Template("""
    THERE ARE ALL ANSWERS FOR QUESTION $ques. 
    $answers
    """)

    def __init__(self, experts: list[ConversableAgent], strategy: str, critic: list[ConversableAgent],
                 llm_config: dict):
        self.experts = experts
        self.strategy = strategy
        self.critic = critic
        self.userAgent = AssistantAgent(
            name='user',
            llm_config=llm_config
        )
        # self.userAgent.register_reply(trigger=lambda x:)

    def run_ensemble(self, question: str, max_round: int):
        expert_response = []
        for expert in self.experts:
            chat_res_expert = self.userAgent.initiate_chat(expert, message=question, max_turns=1)
            expert_response.append(chat_res_expert)
        answer_list = [extract_last_expert_response(item.chat_history, idx) for idx, item in enumerate(expert_response)]
        for _ in range(max_round - 1):
            critic_response, vote_counter = self.do_critic(answer_list, question)
            revised_answer = []
            for idx, expert in enumerate(self.experts):
                cnt = vote_counter.get(str(idx), 0)
                if _ != max_round - 2:
                    message = f"your answer receive {cnt} vote (total vote {len(self.critic)}) You can choose to revise your answer or output the previous answer to get more vote."
                else:
                    message = (f"your answer receive {cnt} vote (total vote {len(self.critic)}) "
                               f"It's your last chance to choose whether to revise your answer or output the previous answer to get more vote."
                               f"OUTPUT FORMAT: FINAL ANSWER: YOUR ANSWER:")
                self.userAgent.send(recipient=expert, message=message, request_reply=True)
                revised_answer.append(extract_last_expert_response([self.userAgent.chat_messages[expert][-1]], idx))
            answer_list = revised_answer.copy()
        critic_response, vote_counter = self.do_critic(answer_list, question)
        sorted_vote = sorted(list(vote_counter.items()), key=itemgetter(1), reverse=True)
        return self.userAgent.chat_messages[self.experts[int(itemgetter(0)(sorted_vote[0])) - 1]][-1]

    def run_group_cooperate(self, question: str, max_round: int = 3):
        EXPERT_PROMPT = (". YOU SHOULD COOPERATE WITH ME TO MAKE THE ANSWER RIGHT AND BEST. "
                         "IF ANY MISTAKE I HAVE MADE,PLS CORRECT ME. IF YOU HAVE SOME IDEAS,PLS RAISE IT.")
        for expert in self.experts:
            expert.update_system_message(expert.system_message + EXPERT_PROMPT)
        group_chat = GroupChat(
            [*self.experts], messages=[], max_round=len(self.experts) * max_round,
            speaker_selection_method="round_robin",
        )
        manager = GroupChatManager(group_chat, name='group_chat_manager',
                                   llm_config=llm_config,

                                   )
        chat_result = self.userAgent.initiate_chat(manager,
                                                   message=question, summary_method='reflection_with_llm',
                                                   summary_args={
                                                       'summary_prompt': "summary all answers provided by all experts "
                                                                         "and give the final answer to me."})

        return chat_result.summary

    def run_with_debate(self,question:str,max_round:int=4):

        group = chunkify(self.experts,2)
        def debate(exp_1:ConversableAgent,exp_2:ConversableAgent):
            self.userAgent.send(message=question,recipient=exp_1,request_reply=True)
            prompt = "you should give me your answer. Then, you should debate with me about the answer and claim that your answer is right and mine is wrong. Do not say anything unrelated to this question."
            "OUTPUT FORMAT: 1.Your claim:  \n 2. My Response \n  3.My Answer  "
            messages = exp_1.chat_messages[self.userAgent][-1]['content']
            update_system_prompt(experts=[exp_1,exp_2],prompt=prompt)
            chat_result = exp_1.initiate_chat(exp_2,clear_history=True,
                                summary_method="reflection_with_llm",
                                summary_args={"summary_prompt":"Summarize chat history provided by me and you give me the final answer about the answer. Summary format : {'me':XXX ,'you':XXX} "},
                                max_turns=4,message="question: "+question+" \n my answer is: "+messages)
            ans = json.loads(chat_result.summary).values()
            self.do_critic(answer_list=ans,question=question)

        debate(self.experts[0],self.experts[1])




    # def run_with_debate


    def do_critic(self, answer_list, question):
        critic_response = []
        for critic_expert in self.critic:
            chat_res = self.userAgent.initiate_chat(critic_expert,
                                                    message=self.critic_prompt.substitute(ques=question,
                                                                                          answers=answer_list),
                                                    max_turns=1)
            critic_response.append(chat_res)
        critic_list = [extract_last_critic_response(item.chat_history, idx) for idx, item in enumerate(critic_response)]
        votes = vote_for_expert(critic_list)
        vote_counter = defaultdict(int)
        for critic_idx, vote_for in votes.items():
            vote_counter[vote_for] += 1
        return critic_response, vote_counter


if __name__ == '__main__':
    llm_configs = [llm_config.copy() for _ in range(6)]
    for idx, llm_config in enumerate(llm_configs):
        llm_configs[idx]['temperature'] = float(idx / 7)
    experts = [AssistantAgent(
        name=f"assistant_{i}", llm_config=llm_configs[i],
        system_message="You are a code assistant，help me write some code ONlY OUTPUT the code! DONT OUTPUT ANYTHING ELSE",
    ) for i in range(6)]
    llm_configs = [llm_config.copy() for _ in range(3)]
    for idx, llm_config in enumerate(llm_configs):
        llm_configs[idx]['temperature'] = float(idx / 4)
    critic = [AssistantAgent(
        name=f"expert_{i}", llm_config=llm_configs[i],
        system_message="YOU ARE A EXPERT OF CODING,YOU SHOULD CHOOSE A BEST ANSWER"
                       "IN A ANSWER LIST I GIVE TO YOU ACCORDING TO YOUR KNOWLEDGE. describe your review for each answer using  {answer_number:reviews} to help it improve"
                       "OUTPUT FORMAT: {'expert':'expert_{expert's number}','reviews':[{'expert_{expert's number}':review_for_answer1},....,]},'why':'tel us why you choose the best answer for this expert'"
    ) for i in range(3)]
    group_experts = EnsembleExperts(experts, critic=critic, llm_config=llm_config, strategy='')
    print(group_experts.run_with_debate(
        "Given two sorted arrays nums1 and nums2 of sizes m and n respectively, please find and return the median of these two sorted arrays. The algorithm's time complexity should be O(log (m+n)).",
        max_round=4))
