
# sudo sh -c 'echo "export OLLAMA_HOST=0.0.0.0:11434" >> /etc/profile'
# launchctl setenv OLLAMA_HOST "0.0.0.0:11434"

import requests
import json
import sqlite3
import time

# 定义 Ollama API 的基本 URL，假设 Ollama 在本地运行，端口为 11434
BASE_URL = "http://localhost:11434"
# BASE_URL = "http://192.168.1.10:11434"

PT1 = "你是一个精通中国古诗词和AI绘画提示词的专家。请根据以下诗词信息，生成一段高质量的、适用于AI绘画工具的中文Prompt提示词。"
PT2 = "你是一个古诗分析专家，请用JSON格式输出以下内容：季节、时间、天气、主要意象、色彩基调、情感类型、艺术流派"
# qwen3:4b
# qwen3:8b
# qwen2.5:1.5b
# deepseek-r1:1.5b
# llama3.2:latest

class LLM_Ollama:
    def __init__(self, model: str = "deepseek-r1:1.5b", host: str = BASE_URL):
        self.model = model
        self.host = f"{host}/api/generate"
        self.connect = sqlite3.connect('/home/ming/Desktop/data.sqlite')
        self.cursor = self.connect.cursor()

    def llm(self, prompt: str):
        payload = {
            "model": self.model,
            "prompt": prompt,
            "stream": False  # 设置为 False 获取完整响应
        }

        try:
            # 发送 POST 请求
            response = requests.post(self.host, json=payload)
            response.raise_for_status()  # 如果请求失败 (状态码 >= 400), 抛出 HTTPError

            # 解析 JSON 响应
            response_data = response.json()
            # 提取生成的文本 (通常在 'response' 键中)
            generated_text = response_data.get("response", "未能从响应中提取文本")
            # if "</think>" in generated_text:
            #     generated_text = generated_text.split("</think>\n\n")[-1].strip()
            return generated_text
        except requests.exceptions.ConnectionError:
            return f"连接错误：无法连接到 Ollama 服务于 {self.host}。请确保 Ollama 正在运行。"
        except requests.exceptions.Timeout:
            return "请求超时。"
        except requests.exceptions.RequestException as e:
            # 捕获其他 requests 相关的错误 (包括 HTTPError)
            error_message = f"请求错误: {e}"
            # 尝试获取更详细的错误信息 (如果响应体是 JSON)
            try:
                error_detail = response.json().get('error', '')
                if error_detail:
                    error_message += f"\nOllama 错误: {error_detail}"
            except (json.JSONDecodeError, AttributeError):
                # 如果响应不是有效的 JSON 或者 response 对象不存在
                pass
            return error_message
        except json.JSONDecodeError:
            return f"JSON 解析错误：无法解析来自 Ollama 的响应。响应内容: {response.text}"
        except Exception as e:
            return f"发生未知错误: {e}"

    # --- 函数 ---
    def pt_with_llm(self, poem_title, poem_author, poem_text, desired_style, aspect_ratio="16:9"):
        prompt_for_llm = self.generate_pt(PT1, poem_title, poem_author, poem_text, desired_style, aspect_ratio)
        try:
             # 提取生成的文本，并做一些清理
            generated_prompt = self.llm(prompt_for_llm)
            # 确保只返回Prompt本身，移除可能的模型前缀或后缀解释
            # print(generated_prompt)
            return generated_prompt

        except Exception as e:
            print(f"Poem: {poem_title}，调用LLM API时出错: {e}")
        
    def generate_pt(self, pt, poem_title, poem_author, poem_text, desired_style, aspect_ratio):
        # 构建给LLM的指令 (Meta-Prompt)
        # 指令越清晰，效果越好
        prompt_for_llm = f"""
        你是一个精通中国古诗词和AI绘画提示词的专家。请根据以下诗词信息，生成一段高质量的、适用于AI绘画工具（如Midjourney, Stable Diffusion）的英文Prompt提示词。

        诗词信息：
        标题：{poem_title}
        作者：{poem_author}
        内容：
        {poem_text}

        生成要求：
        1.  **深刻理解诗词**: 准确捕捉诗词的核心意象、情感基调和整体意境 (mood and atmosphere)。
        2.  **提炼视觉元素**: 识别关键的、可被视觉化的主体、场景、物品、颜色、光线等。
        3.  **侧重意境表达**: 使用能够传达诗词情感和氛围的描述性词语 (e.g., serene, melancholic, vast, desolate, nostalgic, vibrant, somber)。
        4.  **英文输出**: 生成的最终Prompt必须是英文。
        5.  **结构清晰**: Prompt应包含主体、场景、氛围、风格等要素，用逗号分隔。
        6.  **包含指定风格**: 在Prompt末尾加入指定的艺术风格: "{desired_style}"。
        7.  **包含画面比例**: 在Prompt末尾加入画面比例参数: "--ar {aspect_ratio}"。
        8.  **简洁有力**: Prompt应尽可能简洁，同时保留关键信息。
        9.  **直接输出Prompt**: 不要添加任何额外的解释或引言，直接给出最终的Prompt字符串。

        示例（仅作参考，请根据实际诗词生成）：
        Poem: "静夜思" by 李白
        Output Prompt: A solitary figure sits by a simple bed in a traditional Chinese room, bright moonlight streams through the window, illuminating the floor like frost. The figure looks up towards the bright moon visible outside, a mood of quiet contemplation and deep homesickness. {desired_style}, serene, melancholic. --ar {aspect_ratio}

        现在，请根据上面提供的诗词生成Prompt：
        """
        return prompt_for_llm
    
    def parse_poem(self, poems):
        results_llm = []
        default_style = "cinematic, hyperrealistic illustration" # 可以换成你想要的风格
        default_ar = "16:9"

        for poem in poems:
            star_time = time.time()
            format = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(star_time))
            print(f"开始：{format}，model：{self.model}, {poem['title']} - {poem['author']}" )
            try:
                llm_prompt = self.pt_with_llm(
                    poem['title'],
                    poem['author'],
                    poem['text'],
                    desired_style=default_style,
                    aspect_ratio=default_ar
                )
                end_time = time.time() - star_time
                print(f"结束：耗时{int(end_time)}s" )
                results_llm.append({
                    "title": poem['title'],
                    "poem": poem['text'],
                    "style": default_style,
                    "aspect_ratio": default_ar,
                    "prompt": llm_prompt
                })
            except IOError as e:
                print(f"\nError saving LLM prompts to file: {e}")
                # 出错直接保存已有结果
                with open("./audios/poems_prompts.json", "w", encoding="utf-8") as f:
                    json.dump(results_llm, f, ensure_ascii=False, indent=4)

        with open("./audios/poems_prompts.json", "w", encoding="utf-8") as f:
            json.dump(results_llm, f, ensure_ascii=False, indent=4)
            print("\n保存pt poems_prompts.json")
    
    # 中小学诗词生成配图pt
    def test_parse_learn_poem(self):
        default_style = "Chinese ink wash painting" # 可以换成你想要的风格
        default_ar = "16:9"

        with open("./audios/poem_pinyin.json","r") as f:
            all_poems = []
            for grade in json.load(f):
                list_poems = []
                for poem in grade['poems']: 
                    if poem.get('pinyin') != None:
                        try:
                            # 生成pt
                            llm_prompt = self.pt_with_llm(
                                poem['name'],
                                poem['author'],
                                poem['content'],
                                desired_style=default_style,
                                aspect_ratio=default_ar
                            )
                            
                            # 更新pt
                            poem["prompt"]= llm_prompt
                            list_poems.append(poem)
                            print(f"生成 Prompt: {llm_prompt}")
                        except Exception as e:
                            print(f"\nError saving LLM prompts to file: {e}")
                            
                            # 出错直接保存已有结果
                            info_poem = {}
                            info_poem['grade'] = grade['grade']
                            info_poem['poems'] = list_poems
                            all_poems.append(info_poem)
                            with open("./audios/temp_prompts.json", "w", encoding="utf-8") as f:
                                json.dump(all_poems, f, ensure_ascii=False, indent=4)
                    else:
                        list_poems.append(poem)
                
                # 生成新的poem
                info_poem = {}
                info_poem['grade'] = grade['grade']
                info_poem['poems'] = list_poems
                all_poems.append(info_poem)
            
            with open("./audios/poems_prompts.json",'w') as fj:
                fj.write(json.dumps(all_poems))        
        
    def test_gen_poem_pt(self, title = "静夜思", author = "李白"):
        self.cursor.execute("SELECT id, name, content FROM poem WHERE name=? AND author=?", (title, author))
        result = self.cursor.fetchone()
        if result:        
            poems = [{"title": title, "author": author, "text": result[2], "id": result[0]}]
            self.parse_poem(poems)
    
    def test_json(self):
        with open("./audios/poems_prompts.json",'w') as fj:
            fj.write(json.dumps([ {"t":1 }]))
            
# --- 使用示例 ---
if __name__ == "__main__":
    # llm = LLM_Ollama()
    # llm.test_parse_learn_poem()
    with open("./audios/poems_prompts.json",'w') as fj:
        fj.write(json.dumps([ {"t":1 }]))
    
