# 导入需要的库
import copy
from typing import Tuple

## 与大模型的交互
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())

from openai import OpenAI
client = OpenAI()
# from zhipuai import ZhipuAI
# client = ZhipuAI()

# 常量定义
GREEN = '\033[32m' 
BLUE = '\033[34m' 
RESET = '\033[0m' 
ASK_OK = "\n========\n以上生成的内容您是否满意？如果需要调整，请直接给出修改意见。如果您满意，请直接输入“OK”。"

# 在终端中输出绿色文本
def print_green(text, end="\n"):
    print(GREEN + text + RESET, end=end) 

# 定义蓝色输入提示函数
def input_blue(question):
    return input(BLUE + question + "\n==>" + RESET)
# 流式交互接口封装
def get_stream_completion(messages, 
                          temperature = 0.95,
                          model="deepseek-reasoner"):
    # print(f"[get_stream_completion]messages=\n{messages}")
    response = client.chat.completions.create(
        model=model,
        messages=messages,
        temperature = temperature,
        stream=True
    )
    return response

# 定义一个函数，用于分割智谱AI推理流式输出的文本内容
# 判断参数中是否包含"###Thinking"和"###Response"
# 如果包含，则按此两标志分割，分别取得"###Thinking"和"###Response"之后的内容，并以其构成的元组作为返回值
# 如果仅包含"###Thinking"，则取得"###Thinking"之后的内容，并以其和None构成的元组作为返回值
def split_content(content):
    THINKING = "###Thinking"
    RESPONSE = "###Response"
    content = content.strip()
    if THINKING in content and "###Response" in content:
        thinking, response = content.split(THINKING)[1].split(RESPONSE)
        return thinking, response
    elif "###Thinking" in content:
        thinking = content.split(THINKING)[1]
        return thinking, None
    else:
        return None, content
    
# 完成一次智谱AI模型交互
def complete_interaction_Zhipu(short_term_memory):
    # 逐步接收流式数据并显示
    content = ""
    thinking = None
    response = None

    # 获取流式输出的生成器
    stream_response = get_stream_completion(messages = short_term_memory,
                                            model = "glm-zero-preview")
    for chunk in stream_response:
        if chunk.choices[0].delta.content:
            content += chunk.choices[0].delta.content
            
            thinking, response = split_content(content)
            if thinking and not response:
                print_green(chunk.choices[0].delta.content, end="")
            if response:
                print(chunk.choices[0].delta.content, end="")
    
    # 返回最终回复
    return response

# 完成一次DeepSeek模型交互
def complete_interaction_DeepSeek(short_term_memory):   
    # 逐步接收流式数据并显示
    reasoning_content = ""
    content = ""

    # 获取流式输出的生成器
    stream_response = get_stream_completion(messages=short_term_memory,
                                            temperature=1.9,
                                            model="deepseek-reasoner")

    for chunk in stream_response:
        if chunk.choices[0].delta.reasoning_content:
            reasoning_content += chunk.choices[0].delta.reasoning_content
            print_green(chunk.choices[0].delta.reasoning_content, end="")
        elif chunk.choices[0].delta.content:
            content += chunk.choices[0].delta.content
            print(chunk.choices[0].delta.content, end="")

    return content

# 生成小说标题、简介、角色、目录、章节概要
def generate_novel_info(long_term_memory) -> Tuple[str, str]:
    novel_setting = input_blue("请输入小说设定（类型、主题、角色、背景等）：")
    chapter_num = input_blue("请输入章节数量：")

    prompt = f"故事设定：{novel_setting}\n"
    prompt += f"章节数量：{chapter_num}\n"
    prompt += "请根据以上故事生成设定，"
    prompt += "生成故事的标题、简介与目录（由引子、中间各章节、大结局组成），"
    prompt += "以及引子、中间各章节、大结局各章节概述、主线、引出下一章节的伏笔等，但大结局，不需要伏笔。"
    prompt += f"中间各章节数量必须为{chapter_num}，不可增减，引子、大结局必须分别在中间章节前后。"

    short_term_memory = [
        {"role": "user", "content": f"{prompt}"}
    ]
    long_term_memory.append({"role": "user", "content": prompt})

    while True:
        # 与大模型交互
        # novel_info = complete_interaction_Zhipu(short_term_memory)
        novel_info = complete_interaction_DeepSeek(short_term_memory)
        short_term_memory.append({"role": "assistant", "content": novel_info})

        # 与用户交互
        prompt = input_blue(ASK_OK)
        if prompt.upper() == "OK":
            break
        else:            
            short_term_memory.append({"role": "user", "content": prompt})

    long_term_memory.append({"role": "assistant", "content": novel_info})
    return novel_info, chapter_num

# 生成一个章节的小说内容
def generate_a_chapter(chapter_info, long_term_memory):
    print_green(f"正在生成{chapter_info}内容...")
    # 构建提示
    short_term_memory = copy.deepcopy(long_term_memory)
    prompt = f"请基于已经生成的小说目录及简介信息，生成章节{chapter_info}的内容，字数不超过1000字，不要重复。"
    short_term_memory.append({"role": "user", "content": prompt})
    long_term_memory.append({"role": "user", "content": prompt})

    novel_info = None
    while True:
        # 调用模型生成内容
        # novel_info = complete_interaction_Zhipu(short_term_memory)
        novel_info = complete_interaction_DeepSeek(short_term_memory)
        short_term_memory.append({"role": "assistant", "content": novel_info})

        # 与用户交互
        prompt = input_blue(ASK_OK)
        if prompt.upper() == "OK":
            break
        else:            
            short_term_memory.append({"role": "user", "content": prompt})

    long_term_memory.append({"role": "assistant", "content": novel_info})
    return novel_info

# 生成小说正文
def generate_novel_content(chapter_num, long_term_memory):
    # 小说正文
    novel_text = []
    # 依次生成各章节的内容
    # 引子
    chapter_text = generate_a_chapter("引子", long_term_memory)
    novel_text.append(chapter_text)

    # 生成中间各章节内容
    ch_num = int(chapter_num)
    for i in range(1, ch_num + 1):
        chapter_text = generate_a_chapter(f"第{i}章", long_term_memory)
        novel_text.append(chapter_text)

    # 生成大结局
    chapter_text = generate_a_chapter("大结局", long_term_memory)
    novel_text.append(chapter_text)

    return novel_text


# 主函数
def main():
    print("欢迎使用小说生成器！")

    # 长时记忆
    long_term_memory = []

    novel_info, chapter_num = generate_novel_info(long_term_memory)
    # print(novel_info)

    novel_text = generate_novel_content(chapter_num, long_term_memory)
    print("==========小说正文==========")
    print(novel_text)

if __name__ == "__main__":
    main()
