import sys
import time
from langchain_community.llms import Ollama
from langchain.prompts import PromptTemplate, ChatPromptTemplate
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.pipeline import PipelinePromptTemplate
from langchain.chains import LLMChain
import requests

def check_ollama_service():
    """检查 Ollama 服务是否运行"""
    try:
        response = requests.get("http://127.0.0.1:11434/api/tags")
        if response.status_code == 200:
            return True
    except requests.exceptions.ConnectionError:
        return False
    return False

def initialize_llm():
    """初始化 LLM 模型"""
    if not check_ollama_service():
        print("错误: Ollama 服务未运行。请先启动 Ollama 服务。")
        print("启动命令: ollama serve")
        sys.exit(1)
    
    try:
        llm = Ollama(
            base_url="http://127.0.0.1:11434",
            model="deepseek-r1:14b",
            timeout=30  # 设置超时时间
        )
        return llm
    except Exception as e:
        print(f"初始化模型时出错: {str(e)}")
        sys.exit(1)

def run_prompt_example(chain, input_data, description):
    """运行提示示例并处理可能的错误"""
    try:
        print(f"\n=== {description} ===")
        print(f"问题: {chain.prompt.format(**input_data)}")
        response = chain.invoke(input_data)
        print(f"回答: {response['text']}")
    except Exception as e:
        print(f"运行示例时出错: {str(e)}")

def main():
    # 初始化模型
    llm = initialize_llm()
    
    # 1. 基础 Prompt 示例
    basic_prompt = PromptTemplate(
        input_variables=["product"],
        template="你是一个产品专家，请详细介绍这个产品：{product}"
    )
    basic_chain = LLMChain(llm=llm, prompt=basic_prompt)
    
    # 2. Chat Prompt 示例
    chat_prompt = ChatPromptTemplate.from_messages([
        ("system", "你是一个专业的产品经理，请用专业的角度来分析产品"),
        ("human", "介绍下{product}的{aspect}特点")
    ])
    chat_chain = LLMChain(llm=llm, prompt=chat_prompt)
    
    # 3. Few-Shot 示例
    examples = [
        {"input": "快乐", "output": "悲伤"},
        {"input": "黑", "output": "白"},
    ]
    example_prompt = PromptTemplate(
        input_variables=["input", "output"],
        template="输入: {input}\n输出: {output}",
    )
    few_shot_prompt = FewShotPromptTemplate(
        examples=examples,
        example_prompt=example_prompt,
        prefix="给出下面词的反义词",
        suffix="输入: {input}\n输出:",
        input_variables=["input"]
    )
    few_shot_chain = LLMChain(llm=llm, prompt=few_shot_prompt)
    
    # 4. Pipeline Prompt 示例
    full_template = """
    {introduction}
    {example}
    {question}
    """
    introduction_prompt = PromptTemplate.from_template("你现在扮演的角色是: {role}")
    example_prompt = PromptTemplate.from_template("参考示例: {example}")
    question_prompt = PromptTemplate.from_template("请回答问题: {question}")
    
    pipeline_prompt = PipelinePromptTemplate(
        final_prompt=PromptTemplate.from_template(full_template),
        pipeline_prompts=[
            ("introduction", introduction_prompt),
            ("example", example_prompt),
            ("question", question_prompt),
        ]
    )
    pipeline_chain = LLMChain(llm=llm, prompt=pipeline_prompt)
    
    # 运行所有示例
    run_prompt_example(
        basic_chain,
        {"product": "iPhone"},
        "1. 基础 Prompt 调用示例"
    )
    
    run_prompt_example(
        chat_chain,
        {"product": "MacBook Pro", "aspect": "设计"},
        "2. Chat Prompt 调用示例"
    )
    
    run_prompt_example(
        few_shot_chain,
        {"input": "上"},
        "3. Few-Shot 示例调用"
    )
    
    run_prompt_example(
        pipeline_chain,
        {
            "role": "数学老师",
            "example": "当问到1+1时，我会详细解释：1+1=2，因为这是最基本的加法运算",
            "question": "请解释17+24的计算过程"
        },
        "4. Pipeline Prompt 调用示例"
    )

if __name__ == "__main__":
    main() 