-------------------------------------------------------------------------------------------------------------


from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser

# 创建提示模板
prompt = ChatPromptTemplate.from_template("给我讲一个关于{topic}的笑话")

# 初始化模型
model = ChatOpenAI(model="gpt-4")

# 定义输出解析器
output_parser = StrOutputParser()

# 使用管道运算符构建链条
chain = prompt | model | output_parser

# 调用链条
result = chain.invoke({"topic": "冰淇淋"})
print(result)


# 输入数据
input_data = {"topic": "冰淇淋"}

# 分步执行：仅生成提示
prompt_value = prompt.invoke(input_data)
print(prompt_value)

# 分步执行：生成模型输出
message = model.invoke(prompt_value)
print(message)

# 分步执行：解析最终输出
final_output = output_parser.invoke(message)
print(final_output)


from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain_core.runnables import RunnableParallel
from langchain_openai.embeddings import OpenAIEmbeddings

# 创建向量存储
vectorstore = DocArrayInMemorySearch.from_texts(
    ["张三在腾讯工作", "熊猫喜欢吃竹子"],
    embedding=OpenAIEmbeddings()
)

# 创建包含检索的链条
chain = (vectorstore | prompt) | model | output_parser

# 输入数据并执行链条
result = chain.invoke({"topic": "熊猫的饮食习惯"})
print(result)


-------------------------------------------------------------------------------------------------------------


from langchain.llms import OpenAI
# 初始化语言模型，启用流式处理
llm = OpenAI(model_name="text-davinci-003", stream=True)


# 定义回调函数，用于处理流式生成的内容
def stream_callback(response):
    for chunk in response:
        print("流式内容:", chunk["choices"][0]["text"], end="")

# 示例：请求生成内容并处理流式输出
prompt = "请简要描述机器学习的原理。"
response = llm(prompt, callback=stream_callback)


# 定义回调函数，汇总流式生成的内容
def aggregate_streamed_data(response):
    full_content = ""
    for chunk in response:
        content = chunk["choices"][0]["text"]
        full_content += content
        print("当前分片内容:", content)
    
    print("完整生成内容:", full_content)
# 示例：请求生成内容并汇总流式输出
response = llm(prompt, callback=aggregate_streamed_data)


from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory

# 创建带流式处理的对话链
memory = ConversationBufferMemory()
conversation_chain = ConversationChain(llm=llm, memory=memory, stream=True)

# 定义回调函数，逐步反馈生成内容
def real_time_feedback(response):
    for chunk in response:
        print("流式对话回复:", chunk["choices"][0]["text"], end="")

# 示例对话流式生成
user_input = "你能简单介绍一下深度学习吗？"
response = conversation_chain.run(user_input, callback=real_time_feedback)


import time

# 定义带延迟的回调函数
def delayed_streaming(response, delay=0.5):
    for chunk in response:
        print("流式内容:", chunk["choices"][0]["text"], end="")
        time.sleep(delay)  # 控制生成显示的速度

# 示例生成内容并设置延迟
response = llm(prompt, callback=lambda x: delayed_streaming(x, delay=0.3))


-------------------------------------------------------------------------------------------------------------


from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel
# 初始化模型
model = ChatOpenAI(model="gpt-4")

# 创建多个提示模板
prompt1 = ChatPromptTemplate.from_template("请解释机器学习的基本概念。")
prompt2 = ChatPromptTemplate.from_template("深度学习和机器学习的区别是什么？")
prompt3 = ChatPromptTemplate.from_template("人工智能有哪些主要应用？")

# 定义输出解析器
output_parser = StrOutputParser()


# 使用RunnableParallel构建并行任务流
parallel_tasks = RunnableParallel(
    {
        "task1": prompt1 | model | output_parser,
        "task2": prompt2 | model | output_parser,
        "task3": prompt3 | model | output_parser
    }
)
# 执行并行任务
results = parallel_tasks.invoke({})

# 输出结果
for task_name, result in results.items():
    print(f"{task_name} 生成内容: {result}")


# 定义主题列表
topics = ["机器学习", "深度学习", "人工智能"]

# 动态创建任务列表
dynamic_tasks = {
    f"task_{i+1}": ChatPromptTemplate.from_template(f"请解释{topic}的基本概念。") | model | output_parser
    for i, topic in enumerate(topics)
}

# 使用RunnableParallel执行动态任务
parallel_dynamic_tasks = RunnableParallel(dynamic_tasks)
dynamic_results = parallel_dynamic_tasks.invoke({})

# 输出结果
for task_name, result in dynamic_results.items():
    print(f"{task_name} 生成内容: {result}")


# 定义包含错误管理的任务
def safe_task(task):
    try:
        return task.invoke({})
    except Exception as e:
        return f"任务失败: {e}"

# 设置包含错误管理的并行任务流
parallel_tasks_with_error_handling = RunnableParallel(
    {
        "task1": lambda: safe_task(prompt1 | model | output_parser),
        "task2": lambda: safe_task(prompt2 | model | output_parser),
        "task3": lambda: safe_task(prompt3 | model | output_parser)
    }
)

# 执行带错误管理的并行任务
results_with_error_handling = parallel_tasks_with_error_handling.invoke({})
for task_name, result in results_with_error_handling.items():
    print(f"{task_name} 生成内容: {result}")


# 批处理任务示例
from langchain_core.runnables import batch

# 定义批处理并行任务
batched_parallel_tasks = batch(parallel_tasks_with_error_handling)

# 执行批处理任务
batched_results = batched_parallel_tasks.invoke({})
for task_name, result in batched_results.items():
    print(f"{task_name} 生成内容: {result}")


from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel, batch

# 初始化模型
model = ChatOpenAI(model="gpt-4")

# 定义输出解析器
output_parser = StrOutputParser()

# 创建提示模板
prompt1 = ChatPromptTemplate.from_template("请解释机器学习的基本概念。")
prompt2 = ChatPromptTemplate.from_template("深度学习和机器学习的区别是什么？")
prompt3 = ChatPromptTemplate.from_template("人工智能有哪些主要应用？")

# 设置基础并行任务流
parallel_tasks = RunnableParallel(
    {
        "task1": prompt1 | model | output_parser,
        "task2": prompt2 | model | output_parser,
        "task3": prompt3 | model | output_parser
    }
)

# 动态任务生成
topics = ["机器学习", "深度学习", "人工智能"]
dynamic_tasks = {
    f"task_{i+1}": ChatPromptTemplate.from_template(f"请解释{topic}的基本概念。") | model | output_parser
    for i, topic in enumerate(topics)
}
parallel_dynamic_tasks = RunnableParallel(dynamic_tasks)

# 定义带错误管理的任务
def safe_task(task):
    try:
        return task.invoke({})
    except Exception as e:
        return f"任务失败: {e}"

# 设置包含错误管理的并行任务流
parallel_tasks_with_error_handling = RunnableParallel(
    {
        "task1": lambda: safe_task(prompt1 | model | output_parser),
        "task2": lambda: safe_task(prompt2 | model | output_parser),
        "task3": lambda: safe_task(prompt3 | model | output_parser)
    }
)

# 批处理任务示例
batched_parallel_tasks = batch(parallel_tasks_with_error_handling)

# 测试函数执行所有任务
def test_all_tasks():
    print("执行基础并行任务流:")
    results = parallel_tasks.invoke({})
    for task_name, result in results.items():
        print(f"{task_name} 生成内容: {result}")
    
    print("\n执行动态并行任务流:")
    dynamic_results = parallel_dynamic_tasks.invoke({})
    for task_name, result in dynamic_results.items():
        print(f"{task_name} 生成内容: {result}")
    
    print("\n执行带错误管理的并行任务流:")
    results_with_error_handling = parallel_tasks_with_error_handling.invoke({})
    for task_name, result in results_with_error_handling.items():
        print(f"{task_name} 生成内容: {result}")
    
    print("\n执行批处理并行任务流:")
    batched_results = batched_parallel_tasks.invoke({})
    for task_name, result in batched_results.items():
        print(f"{task_name} 生成内容: {result}")

# 执行测试
test_all_tasks()


-------------------------------------------------------------------------------------------------------------


from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel

# 初始化模型
model = ChatOpenAI(model="gpt-4")

# 创建不同任务的提示模板
prompt1 = ChatPromptTemplate.from_template("请解释机器学习的基本概念。")
prompt2 = ChatPromptTemplate.from_template("深度学习和机器学习的区别是什么？")
prompt3 = ChatPromptTemplate.from_template("列出人工智能的主要应用。")

# 初始化输出解析器
output_parser = StrOutputParser()


# 构建并行任务流
parallel_tasks = RunnableParallel(
    {
        "task1": prompt1 | model | output_parser,
        "task2": prompt2 | model | output_parser,
        "task3": prompt3 | model | output_parser
    }
)
# 执行并行任务
results = parallel_tasks.invoke({})

# 输出结果
for task_name, result in results.items():
    print(f"{task_name} 输出内容: {result}")


from langchain_core.runnables import batch

# 定义批量并行任务
batched_parallel_tasks = batch(parallel_tasks)

# 执行批量并行任务
batched_results = batched_parallel_tasks.invoke([{}, {}, {}])  # 示例中使用三个空输入
for index, result in enumerate(batched_results):
    print(f"批次 {index+1} 的结果:")
    for task_name, output in result.items():
        print(f"  {task_name}: {output}")


# 定义安全的任务执行函数
def safe_task(task):
    try:
        return task.invoke({})
    except Exception as e:
        return f"任务失败: {e}"

# 设置带错误处理的并行任务流
safe_parallel_tasks = RunnableParallel(
    {
        "task1": lambda: safe_task(prompt1 | model | output_parser),
        "task2": lambda: safe_task(prompt2 | model | output_parser),
        "task3": lambda: safe_task(prompt3 | model | output_parser)
    }
)

# 执行带错误处理的并行任务
safe_results = safe_parallel_tasks.invoke({})
for task_name, result in safe_results.items():
    print(f"{task_name} 输出内容: {result}")


# 动态生成任务
topics = ["机器学习", "深度学习", "人工智能"]
dynamic_tasks = {
    f"task_{i+1}": ChatPromptTemplate.from_template(f"请解释{topic}") | model | output_parser
    for i, topic in enumerate(topics)
}

# 执行动态任务的并行任务流
parallel_dynamic_tasks = RunnableParallel(dynamic_tasks)
dynamic_results = parallel_dynamic_tasks.invoke({})
for task_name, result in dynamic_results.items():
    print(f"{task_name} 输出内容: {result}")


def test_parallel_execution():
    print("执行基础并行任务流:")
    results = parallel_tasks.invoke({})
    for task_name, result in results.items():
        print(f"{task_name} 输出内容: {result}")
    
    print("\n执行批量并行任务流:")
    batched_results = batched_parallel_tasks.invoke([{}, {}, {}])
    for index, result in enumerate(batched_results):
        print(f"批次 {index+1} 的结果:")
        for task_name, output in result.items():
            print(f"  {task_name}: {output}")
    
    print("\n执行带错误处理的并行任务流:")
    safe_results = safe_parallel_tasks.invoke({})
    for task_name, result in safe_results.items():
        print(f"{task_name} 输出内容: {result}")
    
    print("\n执行动态生成的并行任务流:")
    dynamic_results = parallel_dynamic_tasks.invoke({})
    for task_name, result in dynamic_results.items():
        print(f"{task_name} 输出内容: {result}")

# 运行测试函数
test_parallel_execution()


-------------------------------------------------------------------------------------------------------------


from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatAnthropic

# 初始化主模型并关闭自动重试以便快速触发错误
primary_model = ChatOpenAI(max_retries=0)
fallback_model = ChatAnthropic()

# 配置主模型的回退模型
model_with_fallback = primary_model.with_fallbacks([fallback_model])


-------------------------------------------------------------------------------------------------------------


from unittest.mock import patch
from openai import RateLimitError

# 创建一个速率限制错误示例
error = RateLimitError("rate limit exceeded")

# 模拟API错误并测试回退机制
with patch("openai.ChatCompletion.create", side_effect=error):
    try:
        response = model_with_fallback.invoke("什么是机器学习？")
        print("回退成功，输出内容:", response)
    except RateLimitError:
        print("回退机制失败")


-------------------------------------------------------------------------------------------------------------


from langchain_core.prompts import ChatPromptTemplate

# 定义提示模板
prompt = ChatPromptTemplate.from_template("请解释 {topic} 的基本概念。")

# 将提示模板和回退模型组合
chain_with_fallback = prompt | model_with_fallback

# 测试任务流
with patch("openai.ChatCompletion.create", side_effect=error):
    try:
        response = chain_with_fallback.invoke({"topic": "机器学习"})
        print("回退成功，生成内容:", response)
    except RateLimitError:
        print("回退机制失败")


-------------------------------------------------------------------------------------------------------------


# 配置指定错误类型触发的回退
model_with_specific_fallback = primary_model.with_fallbacks(
    [fallback_model],
    exceptions_to_handle=(RateLimitError,)
)

# 测试指定错误类型的回退
with patch("openai.ChatCompletion.create", side_effect=error):
    try:
        response = model_with_specific_fallback.invoke("什么是深度学习？")
        print("回退成功，生成内容:", response)
    except RateLimitError:
        print("回退机制失败")


-------------------------------------------------------------------------------------------------------------


from langchain_core.output_parsers import StrOutputParser

# 定义提示模板和模型链
prompt = ChatPromptTemplate.from_template("解释 {topic} 的重要性。")
output_parser = StrOutputParser()

# 创建带有回退的模型链
primary_chain = prompt | primary_model | output_parser
backup_chain = prompt | fallback_model | output_parser

# 使用RunnableParallel构建带回退的多模型链
chain_with_fallback_sequence = primary_chain.with_fallbacks([backup_chain])

# 测试带回退的序列链
with patch("openai.ChatCompletion.create", side_effect=error):
    try:
        result = chain_with_fallback_sequence.invoke({"topic": "人工智能"})
        print("回退成功，生成内容:", result)
    except RateLimitError:
        print("回退机制失败")


-------------------------------------------------------------------------------------------------------------


def test_fallback_mechanism():
    print("测试回退机制的设计与实现:")

    # 模拟速率限制错误
    with patch("openai.ChatCompletion.create", side_effect=RateLimitError("rate limit exceeded")):
        try:
            # 基本回退测试
            response = model_with_fallback.invoke("什么是机器学习？")
            print("基本回退成功，输出内容:", response)
        except RateLimitError:
            print("基本回退机制失败")

        # 结合提示模板的回退
        try:
            response = chain_with_fallback.invoke({"topic": "机器学习"})
            print("任务流回退成功，生成内容:", response)
        except RateLimitError:
            print("任务流回退机制失败")

        # 指定错误类型的回退
        try:
            response = model_with_specific_fallback.invoke("什么是深度学习？")
            print("指定错误回退成功，生成内容:", response)
        except RateLimitError:
            print("指定错误回退机制失败")

        # 多模型序列链的回退
        try:
            result = chain_with_fallback_sequence.invoke({"topic": "人工智能"})
            print("序列链回退成功，生成内容:", result)
        except RateLimitError:
            print("序列链回退机制失败")

# 执行测试函数
test_fallback_mechanism()


-------------------------------------------------------------------------------------------------------------


>> pip install -U langsmith
>> export LANGCHAIN_TRACING_V2=true
>> export LANGCHAIN_API_KEY=<your-api-key>
>> export OPENAI_API_KEY=<your-openai-api-key>


import openai
from langsmith.wrappers import wrap_openai
from langsmith import traceable

# 包装OpenAI客户端以启用追踪
client = wrap_openai(openai.Client())

# 使用traceable装饰器自动追踪任务
@traceable
def pipeline(user_input: str):
    result = client.chat.completions.create(
        messages=[{"role": "user", "content": user_input}],
        model="gpt-3.5-turbo"
    )
    return result.choices[0].message.content

# 运行追踪任务
response = pipeline("什么是机器学习？")
print("生成内容:", response)


from langsmith import Client
from langsmith.evaluation import evaluate

# 初始化LangSmith客户端
client = Client()

# 创建数据集，包含输入输出的测试数据
dataset_name = "示例数据集"
dataset = client.create_dataset(dataset_name, description="LangSmith示例数据集")
client.create_examples(
    inputs=[{"postfix": "到LangSmith"}, {"postfix": "到LangSmith评估"}],
    outputs=[{"output": "欢迎到LangSmith"}, {"output": "欢迎到LangSmith评估"}],
    dataset_id=dataset.id,
)

# 定义评估方法
def exact_match(run, example):
    return {"score": run.outputs["output"] == example.outputs["output"]}

# 执行评估
experiment_results = evaluate(
    lambda input: "欢迎 " + input['postfix'],
    data=dataset_name,
    evaluators=[exact_match],
    experiment_prefix="示例实验",
    metadata={"version": "1.0.0", "revision_id": "beta"}
)
print("评估结果:", experiment_results)


-------------------------------------------------------------------------------------------------------------


import openai
from langsmith import traceable, wrap_openai

# 包装OpenAI客户端以支持LangSmith追踪
client = wrap_openai(openai.Client())

# 定义多步骤任务流
@traceable
def process_pipeline(user_input: str):
    # 步骤1：生成初始回复
    step1 = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": user_input}]
    )
    initial_response = step1.choices[0].message.content
    
    # 步骤2：对生成内容进行总结
    step2 = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": f"总结以下内容: {initial_response}"}]
    )
    summary_response = step2.choices[0].message.content

    return {"initial_response": initial_response, "summary_response": summary_response}

# 执行任务并自动追踪
results = process_pipeline("什么是机器学习？")
print("任务执行结果:", results)


@traceable
def optimized_pipeline(user_input: str):
    # 优化步骤1：设置较低的温度以减少生成时间
    step1 = client.chat.completions.create(
        model="gpt-3.5-turbo",
        temperature=0.3,  # 降低温度
        messages=[{"role": "user", "content": user_input}]
    )
    initial_response = step1.choices[0].message.content
    
    # 优化步骤2：使用模型裁剪或压缩方法（例如通过减少token长度）加速
    step2 = client.chat.completions.create(
        model="gpt-3.5-turbo",
        max_tokens=50,  # 限制生成的token数
        messages=[{"role": "user", "content": f"总结以下内容: {initial_response}"}]
    )
    summary_response = step2.choices[0].message.content

    return {"initial_response": initial_response, "summary_response": summary_response}

# 执行优化后的任务流
optimized_results = optimized_pipeline("什么是机器学习？")
print("优化任务执行结果:", optimized_results)


from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatAnthropic

# 设置主模型和备用模型
primary_model = ChatOpenAI(max_retries=0)
fallback_model = ChatAnthropic()

# 定义支持回退的任务流
@traceable
def pipeline_with_fallback(user_input: str):
    try:
        response = primary_model.invoke({"role": "user", "content": user_input})
    except Exception as e:
        print("主模型失败，切换至备用模型")
        response = fallback_model.invoke({"role": "user", "content": user_input})
    return response

# 执行任务并记录追踪
fallback_results = pipeline_with_fallback("描述人工智能的应用场景")
print("回退任务执行结果:", fallback_results)


from langsmith import Client
from langsmith.evaluation import evaluate

# 初始化LangSmith客户端
client = Client()

# 创建数据集，包含批量输入输出
dataset_name = "机器学习批量评估"
dataset = client.create_dataset(dataset_name, description="LangSmith批量任务评估示例")
client.create_examples(
    inputs=[{"postfix": "到LangSmith"}, {"postfix": "到LangSmith评估"}],
    outputs=[{"output": "欢迎到LangSmith"}, {"output": "欢迎到LangSmith评估"}],
    dataset_id=dataset.id,
)

# 定义批量评估方法
def exact_match(run, example):
    return {"score": run.outputs["output"] == example.outputs["output"]}

# 运行批量评估
experiment_results = evaluate(
    lambda input: "欢迎 " + input['postfix'],
    data=dataset_name,
    evaluators=[exact_match],
    experiment_prefix="批量任务评估",
    metadata={"version": "1.0.0", "revision_id": "beta"}
)
print("批量评估结果:", experiment_results)


import openai
from langsmith import traceable, wrap_openai, Client
from langsmith.evaluation import evaluate
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from openai import RateLimitError
from unittest.mock import patch

# 包装OpenAI客户端以支持LangSmith追踪
client = wrap_openai(openai.Client())

# 初始化LangSmith客户端
langsmith_client = Client()

# 配置主模型和备用模型
primary_model = ChatOpenAI(max_retries=0)
fallback_model = ChatAnthropic()
model_with_fallback = primary_model.with_fallbacks([fallback_model])

# 创建数据集用于批量评估
dataset_name = "机器学习批量评估"
dataset = langsmith_client.create_dataset(dataset_name, description="LangSmith批量任务评估示例")
langsmith_client.create_examples(
    inputs=[{"postfix": "到LangSmith"}, {"postfix": "到LangSmith评估"}],
    outputs=[{"output": "欢迎到LangSmith"}, {"output": "欢迎到LangSmith评估"}],
    dataset_id=dataset.id,
)

# 定义多步骤任务流并支持LangSmith追踪
@traceable
def process_pipeline(user_input: str):
    # 步骤1：生成初始回复
    step1 = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": user_input}]
    )
    initial_response = step1.choices[0].message.content

    # 步骤2：对生成内容进行总结
    step2 = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": f"总结以下内容: {initial_response}"}]
    )
    summary_response = step2.choices[0].message.content

    return {"initial_response": initial_response, "summary_response": summary_response}

# 定义支持回退的任务流
@traceable
def pipeline_with_fallback(user_input: str):
    try:
        response = primary_model.invoke({"role": "user", "content": user_input})
    except Exception as e:
        print("主模型失败，切换至备用模型")
        response = fallback_model.invoke({"role": "user", "content": user_input})
    return response

# 批量评估方法
def exact_match(run, example):
    return {"score": run.outputs["output"] == example.outputs["output"]}

# 测试函数，包含追踪、回退和批量评估
def test_langsmith_integration():
    # 执行追踪任务
    print("执行追踪任务:")
    results = process_pipeline("什么是机器学习？")
    print("追踪任务结果:", results)

    # 执行回退任务
    print("\n执行回退任务:")
    fallback_results = pipeline_with_fallback("描述人工智能的应用场景")
    print("回退任务结果:", fallback_results)

    # 执行批量评估
    print("\n执行批量评估:")
    experiment_results = evaluate(
        lambda input: "欢迎 " + input['postfix'],
        data=dataset_name,
        evaluators=[exact_match],
        experiment_prefix="批量任务评估",
        metadata={"version": "1.0.0", "revision_id": "beta"}
    )
    print("批量评估结果:", experiment_results)

# 运行测试函数
test_langsmith_integration()