from common.algo_base import AlgoBase
from common.log import log_method_io
from conf.config import settings
import difflib
import asyncio
import json
import os
import datetime

class TextTransfer(AlgoBase):
    prompts_name = {
        'prompt_text_transfer': 'prompt_text_transfer',
        'prompt_merge_transfered_text': 'prompt_merge_transfered_text'
    }

    def __init__(self):
        super().__init__()
        self.max_input_tokens = 2048
        self.max_output_tokens = 2048
        
    @log_method_io
    async def llm_generate_transfer_text(self, text: str, keywords: str = None):
        messages = self.prompt.slot_replacement(
            prompt=self.prompt_text_transfer.messages, 
            input=text,
            keywords=keywords,
            output_basemodel=self.prompt_text_transfer.output_format["output_basemodel"],
            output_description=self.prompt_text_transfer.output_format["output_description"]
        )
        output_basemodel = self.prompt_text_transfer.output_format["output_basemodel"]
        result = self.llm_client.llm_request_json_chat(messages=messages, output_basemodel=output_basemodel, max_tokens=self.max_output_tokens)
        return result["transfered_text"]

    @log_method_io
    def calculate_similarity(self, text: str, transfered_text: str):
        sm = difflib.SequenceMatcher(None, text, transfered_text)
        return sm.ratio()

    @log_method_io
    def llm_merge_transfered_text(self, text1: str, text2: str, keywords: str = None):
        messages = self.prompt.slot_replacement(
            prompt=self.prompts_name['prompt_merge_transfered_text'], 
            raw_text=text1,
            input=text2,
            keywords=keywords,
            output_basemodel=self.prompt_merge_transfered_text.output_format["output_basemodel"],
            output_description=self.prompt_merge_transfered_text.output_format["output_description"]
        )
        output_basemodel = self.prompt_merge_transfered_text.output_format["output_basemodel"]
        result = self.llm_client.llm_request_json_chat(messages=messages, output_basemodel=output_basemodel, max_tokens=self.max_output_tokens)
        return result["merged_text"]

    @log_method_io
    def merge_transfered_text(self, transfered_text_list: list, keywords: str = None):
        if not transfered_text_list:
            return ""
        
        result = transfered_text_list[0]
        for item in transfered_text_list[1:]:
            result = self.llm_merge_transfered_text(result, item, keywords)
        return result

    async def concurrent_transfer(self, text_list: list, keywords: str = None):
        ## 并发对这些文本进行润色
        if not text_list:
            return []
        
        semaphore = asyncio.Semaphore(self.llm_client.semaphore)
        async def semaphore_func(text, keywords):
            async with semaphore:
                return await self.llm_generate_transfer_text(text, keywords)
        tasks = [semaphore_func(text, keywords) for text in text_list]
        ## TODO: 这里需要优化，输出结果需要保证有序
        result = await asyncio.gather(*tasks)

        ## 对结果进行合并
        # result = self.merge_transfered_text(result, keywords)

        return result
    
    @log_method_io
    async def concurrent_verify_transfered_text(self, text_list: list, transfered_text_list: list, keywords: str = None):
        if not text_list or not transfered_text_list:
            return []
        
        semaphore = asyncio.Semaphore(self.llm_client.semaphore)
        async def semaphore_func(i):
            async with semaphore:
                return await self.llm_verify_transfered_text(text_list[i], transfered_text_list[i], keywords)
        tasks = [semaphore_func(i) for i in range(len(text_list))]
        ## TODO: 这里需要优化，输出结果需要保证有序
        result = await asyncio.gather(*tasks)

        return result
    @log_method_io
    async def llm_verify_transfered_text(self, text: str, transfered_text: str, keywords: str = None):
        ## 计算相似度
        similarity = self.calculate_similarity(text, transfered_text)

        max_retry = 3
        retry_count = 0
        while retry_count < max_retry:
            if similarity < 0.5:
                break

            content_messages = self.prompt.slot_replacement(
                prompt=self.prompt_text_transfer.messages, 
                input=text,
                keywords=keywords,
                output_basemodel=self.prompt_text_transfer.output_format["output_basemodel"],
                output_description=self.prompt_text_transfer.output_format["output_description"]
            )
            fix_message = f"你输出的文本相似度过高，请重新润色"

            transfered_text = self.llm_client.retry_chat_as_json(
                content_messages=content_messages, 
                system_messages=transfered_text, 
                fix_messages=fix_message, 
                output_basemodel=self.prompt_text_transfer.output_format["output_basemodel"],
                max_retry=1,
                temperature=1
            )
            transfered_text = transfered_text["transfered_text"]
            similarity = self.calculate_similarity(text, transfered_text)
            retry_count += 1

        return transfered_text, similarity
    
    def save_json(self, text, keywords, transfered_text, similarity):
        current_time = datetime.datetime.now().replace(microsecond=0)
        file_path = os.path.join(settings.FILE_CONFIG['OUTPUT_DIR'], current_time.strftime('%Y-%m-%d-%H-%M-%S')+"-text_transfer.json")

        # 确保输出目录存在
        os.makedirs(os.path.dirname(file_path), exist_ok=True)
        data = {
            "input": text,
            "keywords": keywords,
            "transfered_text": transfered_text,
            "similarity": similarity
        }
        with open(file_path, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False)

    def normalize_result(self, result):
        """
        Args:
            result: 结果列表
        Returns:
            result_list: 润色后的文本列表
            similarity: 相似度
        """
        result_list = []
        result_similarity = []

        for item in result:
            result_list.append(item[0])
            result_similarity.append(item[1])

        result_list = "\n".join(result_list)
        similarity = sum(result_similarity)/len(result_similarity)

        return result_list, similarity


    async def execute(self, text: str, keywords: str = None):
        """执行文本润色
        
        Args:
            text: 待润色的文本
            keywords: 关键词
            
        Returns:
            str: 润色后的文本
            float: 相似度
        """
        ## 文本分割
        text_list = self.input_segment(text, self.max_input_tokens)

        ## 并发对这些文本进行润色
        transfered_text_list = await self.concurrent_transfer(text_list, keywords)

        ## 结果校验
        result = await self.concurrent_verify_transfered_text(text_list, transfered_text_list, keywords)

        ## 结果合并
        transfered_text, similarity = self.normalize_result(result)

        ## 文件存储(json)
        self.save_json(text, keywords, transfered_text, similarity)

        return transfered_text, similarity

    def apply(self, *args, **kwargs):
        return asyncio.run(self.run(*args, **kwargs))

if __name__ == "__main__":
    text_transfer = TextTransfer()
    text_transfer.apply(
        text="""""",
        keywords="诙谐幽默的语气，严谨的知识体系"
    )