from ast import Return
import os
import logging
import re
import openai  # 添加此行以支持 APIError 捕获
from openai import OpenAI
import pathlib
import json
from traceback import format_exc
import sys
import traceback
import types
from concurrent.futures import Future, ThreadPoolExecutor


def log_uncaught_exceptions(
        exc_type: type[BaseException],
        exc_value: BaseException,
        exc_traceback: types.TracebackType):
    with open("crash_log.txt", "w") as f:
        traceback.print_exception(exc_type, exc_value, exc_traceback, file=f)
    traceback.print_exception(exc_type, exc_value, exc_traceback)


sys.excepthook = log_uncaught_exceptions


def clear_screen():
    # 判断操作系统
    if os.name == 'nt':  # 如果是Windows
        os.system('cls')
    else:  # 如果是Mac或Linux
        os.system('clear')


class ChatManager:
    def __init__(self,
                 api_key: str = "",
                 base_url: str =
                 "https://dashscope.aliyuncs.com/compatible-mode/v1"
                 ):
        self.api_key: str = api_key
        if self.api_key == "":
            with open(pathlib.Path(__file__).parent / "auth.txt") as f:
                self.api_key = f.read().strip(" \n")
        self.base_url: str = base_url
        self.client: OpenAI = OpenAI(
            api_key=self.api_key, base_url=self.base_url)
        self.project_dir: pathlib.Path = pathlib.Path(
            __file__).parent.parent.parent.parent.parent.parent
        self.logger = logging.getLogger(__name__)
        self.logger.info(
            f"Project directory: {self.project_dir}: {'Exists' if self.project_dir.exists() else 'Not Exists'}")
        self.file_handler = logging.FileHandler('log.txt')

        self.logger.setLevel(logging.DEBUG)
        self.stream_handler = logging.StreamHandler()
        self.formatter = logging.Formatter(
            '%(asctime)s - %(levelname)s - %(message)s')
        self.file_handler.setFormatter(self.formatter)
        self.stream_handler.setFormatter(self.formatter)
        self.logger.addHandler(self.file_handler)
        self.logger.addHandler(self.stream_handler)

    def chat(self, model: str, messages: list):
        response = self.client.chat.completions.create(
            model=model, messages=messages)
        return response.choices[0].message.content if response.choices[0].message.content is not None else ""

    def chat_and_save(self, messages: list, file_path: str | pathlib.Path, model: str='qwen-coder-plus'):
        response = self.chat(model, messages)
        with open(file_path, 'w') as f:
            f.write(response)
        return response

    def wrap_message(
        self,
        message: str,
        existing_content: list[dict[str, str]] | None = None
    ) -> list[dict[str, str]]:
        """
        Wrap the message into a list of dictionaries.
        If no existing content is provided,
        a new system message will be added to the beginning of the list.
        """
        if existing_content is None:
            existing_content = [
                {
                    'role': 'system',
                    'content': 'You are a helpful assistant on software engineering documentation.' +
                               'You can help users write software engineering documentation.' +
                               'You can also refine javadoc comments which you find incomplete or have room for improvement.' +
                               'Instead of just giving suggestions, you can also give examples.'
                }
            ]
        self.logger.warning(f"wrap_message: {message}")
        self.logger.warning(f"existing_content: {existing_content}")
        self.logger.warning(format_exc())
        existing_content.append({
            'role': 'user',
            'content': message
        })
        return existing_content

    def stream_token(self, stream, messages: list):
        """
        Stream the token from the response
        """
        for chunk in stream:
            if chunk.choices[0].delta.content is not None:
                yield chunk.choices[0].delta.content  # 逐段返回内容

    def stream_chat(self,
                    messages: list,
                    extra_file: str | pathlib.Path | None = None,
                    limit_length: bool = True,
                    model: str = "qwen-plus") -> str:
        """
        Stream the chat response
        write the response to a provided file if provided
        """
        self.logger.info(model)
        if (len(messages) > 3 and limit_length):
            raise ValueError(
                f"Too many messages--{len(messages)}:{[m['content'][:50]+'...' for m in messages]}")
        extra_file_path = None
        if (extra_file is not None):
            # create such file if not exists
            # later we will use it to save the response
            if isinstance(extra_file, str):
                extra_file_path = pathlib.Path(extra_file)
            elif isinstance(extra_file, pathlib.Path):
                extra_file_path = extra_file
            # we first check if the parent directory exists
            if not extra_file_path.absolute().parent.exists():
                raise FileNotFoundError(
                    f"Parent directory of {extra_file} not found")
            if not extra_file_path.exists():
                extra_file_path.touch()
        stream = self.client.chat.completions.create(
            model=model,
            messages=messages,
            stream=True  # 启用流式输出
        )
        clear_screen()
        streamed_content = ""
        try:
            for content in self.stream_token(stream=stream, messages=messages):
                print(content, end='', flush=True)
                streamed_content += content
            if extra_file_path is not None:
                with open(extra_file_path, "a") as f:
                    f.write(streamed_content)
                self.logger.warning(f"\n\nResponse saved to {extra_file_path}")
            self.logger.info(f"Full streamed response: {streamed_content}")
            return streamed_content
        except openai.APIError as e:
            self.logger.error(f"APIError occurred: {e}")
            self.logger.error(f"Request messages: {messages}")
            self.logger.error(f"Partial response: {streamed_content}")
            # raise
            return ""  # 明确返回空字符串以符合返回类型注解

    def get_java_src_dir(self):
        dir = pathlib.Path(__file__).parent.parent.parent.parent.parent.parent
        dir = dir / "src" / "main" / "java"
        dir = dir/"cn"/"edu"/"usst"/"cs"/"campusAid"
        if not dir.exists():
            raise FileNotFoundError(f"Java source directory not found: {dir}")
        return dir

    def demo(self, messages: list = [
        {
            'role': 'system',
            'content': 'You are a helpful assistant on software engineering documentation.'
        },
        {
            'role': 'user',
            'content': '你是谁？'
        }
    ]):
        self.stream_chat(messages=messages)

    def get_classes(self) -> list[str]:
        class_list_file = pathlib.Path(__file__).parent / "list.txt"
        class_list = []
        with open(class_list_file, "r") as f:
            class_list += [l for l in f.readlines()]
        return class_list

    def init_java_classes(self) -> str:
        class_list = self.get_classes()
        message_str = "我现在有一个校园互助平台项目，用spring。\n以下是我们这个项目Java类的列表：\n"
        for class_name in class_list:
            message_str += f"{class_name.strip()}\n"
        # message_str+="现在我们要写详细设计文档，对每个类要有如下要求"
        # with open(pathlib.Path(__file__).parent / "req.txt", "r") as f:
        #     message_str += f"{f.read()}\n"
        message_str += "能不能帮我们分析一下，哪些类值得写进详细设计文档？\n"
        message_str += "另外\n"
        with open(pathlib.Path(__file__).parent / "extras.txt", "r") as f:
            message_str += f"{f.read()}\n"
        # message =self.wrap_message(message=message_str)
        return message_str

    def get_question_str_for_detail(self, class_name: str) -> str:
        message_str = f"我现在想写{class_name}的介绍部分\n要求是：\n"
        with open(pathlib.Path(__file__).parent / "req_modified.txt", "r") as f:
            message_str += f"{f.read()}\n"
        # iterate the java sc folder to see the source code
        java_src_dir = self.get_java_src_dir()
        candidate_files = []
        # use rglob to find all java files
        self.logger.info(f"Searching for {class_name}.java in {java_src_dir}")
        self.logger.info(f"note: first subdirs:{os.listdir(java_src_dir)}")
        for file in java_src_dir.rglob(f"{class_name}.java"):
            candidate_files.append(file.absolute().resolve())
        # ensure unique i.e. no plural matches
        if (len(candidate_files) != 1):
            raise ValueError(
                f"Multiple files found for {class_name}:{candidate_files}")
        with open(candidate_files[0], "r") as f:
            class_code = f.read()
        message_str += "能不能帮我完成这部分的内容\n"
        message_str += "图尽量用plantUML\n实在不行用MarkDown\n"
        message_str += f"以下是{class_name}的代码：\n"
        message_str += f"{class_code}\n"
        # breakpoint()
        return message_str

    def get_all_classnames(self) -> list[str]:
        class_paths_list = self.get_classes()
        class_names_list = [class_path.split(
            "/")[-1].split(".")[0] for class_path in class_paths_list]
        return class_names_list

    def extract_classes(self, response_text: str) -> list[str]:
        """
        提取回复文本中提及中的类
        """
        response_text_lines = response_text.split("\n")
        # strip markdown notes such as - * `
        # get existing classes from the list.txt
        # this txt provides paths to .java files
        # we need to EXTRACT the class names from the paths
        # for example: src/main/java/com/example/demo/DemoController.java
        # to DemoController
        class_paths_list = self.get_classes()
        class_names_list = [class_path.split(
            "/")[-1].split(".")[0] for class_path in class_paths_list]
        mentioned_classes = []
        self.logger.warning(
            f"Loaded {len(class_names_list)} classes: {class_names_list}")
        # input(f"loaded {len(class_names_list)} classes")
        # now we have a list of class names
        # we need to extract the class names from the response text
        # but we do not know the format of the response text
        # we will use 2 loops to extract the class names
        start = False
        end = False
        for line in response_text_lines:
            # if '例' in line:
            #     break
            if not start:
                if '确认要写的类有' in line:
                    start = True
                    self.logger.warning(f"Found start line: {line}")
                    continue
                if '以上是确认要写的类' in line:
                    end = True
                    self.logger.warning(f"Found end line: {line}")
                    break
                # Find all matching class names in the line
                matches = [cls for cls in class_names_list if cls.lower()
                           in line.lower()]
                if matches:
                    # Choose the longest matching class name
                    longest_match = max(matches, key=len)
                    self.logger.warning(
                        f"Found class name (longest match): {longest_match}")
                    mentioned_classes.append(longest_match)
                    response_text_lines.remove(line)
        # now we have a list of class names
        # input(f"Extracted class names: {mentioned_classes}")
        return mentioned_classes

    def append_question_to_detail(
        self,
        existing_questions: list[dict[str, str]],
        class_name: str
    ) -> list[dict]:
        question_str = self.get_question_str_for_detail(
            class_name=class_name
        )
        question = self.wrap_message(
            message=question_str,
            existing_content=existing_questions
        )
        return question


def main(details: bool = False):
    manager = ChatManager()

    if not details:
        prepare_message_str = manager.init_java_classes()
        times = 10
        with ThreadPoolExecutor() as executor:
            futures = []
            for i in range(times):
                future = executor.submit(
                    process_iteration, i, prepare_message_str, manager)
                futures.append(future)

            for future in futures:
                future.result()

        breakpoint()

        # 主循环 对每个类进行分析
        # question_to_detail=prepare_message
        question_to_detail = manager.wrap_message(
            message="我现在有一个校园互助平台项目，用spring。\n" +
            "现在要写详细设计文档\n"+''
            + "接下来我们逐个类进行分析。\n"
            + "我会给出类的源代码和要求的格式。\n"
            + "如果你觉得这个类不值得写进详细设计文档，请直接说不需要。\n"
            + "如果你觉得这个类值得写进详细设计文档，请给出详细的介绍部分。\n"
        )
        # 2 messages
        # 1 system message and 1 user message

        print("Detailed design generation not asked...")
        return
    mentioned_classes = manager.get_all_classnames()

    def process_class(class_name, question_to_detail, manager: ChatManager):
        question_record_dir = pathlib.Path(__file__).parent / "questions"
        if not question_record_dir.exists():
            question_record_dir.mkdir(parents=True)
        with open(question_record_dir / f"{class_name}.json", "w") as f:
            json.dump(question_to_detail, f)
        result_dir = pathlib.Path(__file__).parent / "details"/"md"
        if not result_dir.exists():
            result_dir.mkdir(parents=True)
        manager.chat_and_save(
            messages=question_to_detail,
            file_path=(result_dir / f"{class_name}.md"),
            model='qwen-coder-plus'
        )
        print(f"Processed {class_name}")
    first_demo = True
    with ThreadPoolExecutor() as executor:
        futures: list[Future] = []
        for class_name in mentioned_classes:
            question_to_detail_str = manager.get_question_str_for_detail(
                class_name=class_name
            )
            question_to_detail = manager.wrap_message(question_to_detail_str)

            manager.logger.info(f"Question to detail: {question_to_detail}")
            if first_demo:
                breakpoint()
                first_demo = False

            future = executor.submit(
                process_class, class_name, question_to_detail, manager)
            futures.append(future)

        for future in futures:
            try:
                future.result()
            except Exception as e:
                manager.logger.error(f"Error processing class: {e}")

    return "Processed all classes"  # Ensure a string is returned on all paths

    # # 现在我们已经完成了所有的类
    # # 现在我们要对所有的类进行总结
    # question_to_detail = manager.wrap_message(
    #     message="现在你知道了所有的类，能不能再看看，哪些类真的值得写进去",
    #     existing_content=question_to_detail
    # )
    # result = manager.stream_chat(
    #     messages=question_to_detail,
    #     extra_file=pathlib.Path(__file__).parent / "result.txt"
    # )
    # manager.logger.warning(f"Result: {result}")


def process_iteration(i: int, prepare_message_str: str, manager: ChatManager) -> list[str]:
    # 第一轮 问有哪些类
    prepare_message = manager.wrap_message(message=prepare_message_str)

    # manager.demo()
    result = manager.chat_and_save(
        model='qwen-coder-plus',
        messages=prepare_message,
        file_path=pathlib.Path(__file__).parent / "result.txt"
    )

    # 第二轮 对接下来的任务进行准备性质的提示
    prepare_message = manager.wrap_message(
        message="好的，接下来我们逐个类进行分析。\n"
        + "我会给出类的源代码和要求的格式。\n"
        + "如果你觉得这个类不值得写进详细设计文档，请直接说不需要。\n",
        existing_content=prepare_message
    )

    mentioned_classes = manager.extract_classes(result)
    result_dir = pathlib.Path(__file__).parent / "mentioned_classes"
    if not result_dir.exists():
        result_dir.mkdir()
    json.dump(mentioned_classes, open(
        result_dir / f"mentioned_classes_{i}.json", "w"))
    manager.logger.warning(f"\n\nMentioned classes: {mentioned_classes}")
    return mentioned_classes


if __name__ == "__main__":
    main(True)
