-------------------------------------------------------------------------------------------------------------


from langchain.callbacks.base import BaseCallbackHandler
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain


class CustomCallbackHandler(BaseCallbackHandler):
    def on_llm_start(self, serialized, prompts, **kwargs):
        print("LLM任务开始。提示内容:", prompts)

    def on_llm_new_token(self, token, **kwargs):
        print("生成新Token:", token)

    def on_llm_end(self, response, **kwargs):
        print("LLM任务结束。生成的内容:", response.generations)

    def on_chain_start(self, serialized, inputs, **kwargs):
        print("链开始。输入内容:", inputs)

    def on_chain_end(self, outputs, **kwargs):
        print("链结束。输出内容:", outputs)


# 定义提示模板
template = "根据以下输入生成答案：{question}"
prompt = PromptTemplate(template=template, input_variables=["question"])

# 初始化语言模型
llm = OpenAI(temperature=0)


# 实例化回调处理程序
callback_handler = CustomCallbackHandler()

# 创建包含回调处理程序的LLM链
chain = LLMChain(
    llm=llm,
    prompt=prompt,
    callbacks=[callback_handler]
)


# 运行链，提供中文输入
response = chain.run({"question": "今天天气怎么样？"})
print("链的最终输出:", response)


-------------------------------------------------------------------------------------------------------------


>> pip install langchain openai


from langchain.callbacks.base import BaseCallbackHandler
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
import asyncio
class CustomChainCallbackHandler(BaseCallbackHandler):
    def on_chain_start(self, serialized, inputs, **kwargs):
        print("链开始。输入内容:", inputs)

    def on_chain_end(self, outputs, **kwargs):
        print("链结束。输出内容:", outputs)

    def on_llm_start(self, serialized, prompts, **kwargs):
        print("LLM任务开始。提示内容:", prompts)

    def on_llm_new_token(self, token, **kwargs):
        print("生成新Token:", token)

    def on_llm_end(self, response, **kwargs):
        print("LLM任务结束。生成的内容:", response.generations)

# 定义提示模板
template = "根据以下输入生成答案：{question}"
prompt = PromptTemplate(template=template, input_variables=["question"])

# 初始化语言模型
llm = OpenAI(temperature=0)


# 实例化自定义回调处理程序
callback_handler = CustomChainCallbackHandler()

# 创建包含回调的LLM链
chain = LLMChain(
    llm=llm,
    prompt=prompt,
    callbacks=[callback_handler]
)


# 运行链，提供中文输入
response = chain.run({"question": "今天的新闻头条是什么？"})
print("链的最终输出:", response)


class AsyncCustomCallbackHandler(BaseCallbackHandler):
    async def on_llm_new_token(self, token: str, **kwargs):
        print(f"异步生成的新Token: {token}")

# 初始化异步回调处理程序
async_callback_handler = AsyncCustomCallbackHandler()

# 异步运行函数
async def run_async_chain():
    async_chain = LLMChain(
        llm=llm,
        prompt=prompt,
        callbacks=[async_callback_handler]
    )
    response = await async_chain.arun({"question": "讲个关于科技的笑话"})
    print("链的最终异步输出:", response)

# 异步执行
await run_async_chain()


-------------------------------------------------------------------------------------------------------------


from langchain.callbacks.base import BaseCallbackHandler
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain


class SingleCallbackHandler(BaseCallbackHandler):
    # 开始任务链时的回调
    def on_chain_start(self, serialized, inputs, **kwargs):
        print("链开始。输入内容:", inputs)

    # 结束任务链时的回调
    def on_chain_end(self, outputs, **kwargs):
        print("链结束。输出内容:", outputs)

    # 任务开始时的回调
    def on_llm_start(self, serialized, prompts, **kwargs):
        print("LLM任务开始。提示内容:", prompts)

    # 每生成一个新Token时的回调
    def on_llm_new_token(self, token, **kwargs):
        print("生成新Token:", token)

    # 任务结束时的回调
    def on_llm_end(self, response, **kwargs):
        print("LLM任务结束。生成的内容:", response.generations)


# 定义提示模板
template = "回答以下问题：{question}"
prompt = PromptTemplate(template=template, input_variables=["question"])

# 初始化语言模型
llm = OpenAI(temperature=0)
# 实例化回调处理程序
callback_handler = SingleCallbackHandler()

# 创建包含回调的LLM链
chain = LLMChain(
    llm=llm,
    prompt=prompt,
    callbacks=[callback_handler]
)


# 提供中文输入，触发链回调
response = chain.run({"question": "中国的首都是哪里？"})
print("链的最终输出:", response)


-------------------------------------------------------------------------------------------------------------


from langchain.callbacks.base import BaseCallbackHandler

# 回调1：记录链的执行进度
class ProgressLoggerCallbackHandler(BaseCallbackHandler):
    def on_chain_start(self, serialized, inputs, **kwargs):
        print("进度日志 - 链开始。输入内容:", inputs)

    def on_chain_end(self, outputs, **kwargs):
        print("进度日志 - 链结束。输出内容:", outputs)

# 回调2：记录生成的内容
class ContentLoggerCallbackHandler(BaseCallbackHandler):
    def on_llm_start(self, serialized, prompts, **kwargs):
        print("内容日志 - LLM任务开始。提示内容:", prompts)

    def on_llm_new_token(self, token, **kwargs):
        print("内容日志 - 生成新Token:", token)

    def on_llm_end(self, response, **kwargs):
        print("内容日志 - LLM任务结束。生成的内容:", response.generations)


from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

# 定义提示模板
template = "回答以下问题：{question}"
prompt = PromptTemplate(template=template, input_variables=["question"])

# 初始化语言模型
llm = OpenAI(temperature=0)


# 初始化回调处理程序
progress_logger = ProgressLoggerCallbackHandler()
content_logger = ContentLoggerCallbackHandler()

# 创建包含多个回调的LLM链
chain = LLMChain(
    llm=llm,
    prompt=prompt,
    callbacks=[progress_logger, content_logger]
)


# 测试输入
response = chain.run({"question": "中国的国花是什么？"})
print("链的最终输出:", response)


from langchain.callbacks.base import BaseCallbackHandler

# 回调1：进度监控回调
class ProgressMonitorCallbackHandler(BaseCallbackHandler):
    def on_chain_start(self, serialized, inputs, **kwargs):
        print("进度监控 - 链开始。输入内容:", inputs)

    def on_chain_end(self, outputs, **kwargs):
        print("进度监控 - 链结束。输出内容:", outputs)

# 回调2：内容记录回调
class ContentLoggerCallbackHandler(BaseCallbackHandler):
    def on_llm_start(self, serialized, prompts, **kwargs):
        print("内容记录 - LLM任务开始。提示内容:", prompts)

    def on_llm_new_token(self, token, **kwargs):
        print("内容记录 - 生成新Token:", token)

    def on_llm_end(self, response, **kwargs):
        print("内容记录 - LLM任务结束。生成的内容:", response.generations)


from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

# 定义提示模板
template = "请详细描述中国古代历史中的重大事件：{event}"
prompt = PromptTemplate(template=template, input_variables=["event"])

# 初始化语言模型
llm = OpenAI(temperature=0)


# 实例化回调处理程序
progress_monitor = ProgressMonitorCallbackHandler()
content_logger = ContentLoggerCallbackHandler()

# 创建包含多个回调的LLM链
chain = LLMChain(
    llm=llm,
    prompt=prompt,
    callbacks=[progress_monitor, content_logger]
)


# 运行链，提供一个超长文本问题
response = chain.run({"event": "从秦朝到汉朝的统一历程"})
print("链的最终输出:", response)


-------------------------------------------------------------------------------------------------------------


import os
os.environ["LANGCHAIN_TRACING"] = "true"
from langchain.callbacks import get_openai_callback
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
import os

# 设置环境变量以启用跟踪
os.environ["LANGCHAIN_TRACING"] = "true"

# 定义一个提示模板
template = "回答以下问题：{question}"
prompt = PromptTemplate(template=template, input_variables=["question"])

# 初始化语言模型
llm = OpenAI(temperature=0)

# 创建任务链
chain = LLMChain(llm=llm, prompt=prompt)

# 使用跟踪进行链式任务执行
with get_openai_callback() as cb:
    response = chain.run({"question": "中国的首都是哪里？"})
    print("任务链最终输出:", response)
    # 输出追踪数据
    print("Tokens消耗:", cb.total_tokens)
    print("花费:", cb.total_cost)


-------------------------------------------------------------------------------------------------------------


import os
os.environ["LANGCHAIN_TRACING"] = "true"
from langchain.callbacks import get_openai_callback
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain, SimpleSequentialChain

# 定义第一个提示模板
template1 = "简要介绍{topic}的背景。"
prompt1 = PromptTemplate(template=template1, input_variables=["topic"])

# 定义第二个提示模板
template2 = "{background}。请进一步描述{topic}的关键事件。"
prompt2 = PromptTemplate(template=template2, input_variables=["background", "topic"])

# 初始化OpenAI模型
llm = OpenAI(temperature=0)

# 创建第一个链，用于背景介绍
chain1 = LLMChain(llm=llm, prompt=prompt1)

# 创建第二个链，用于描述关键事件
chain2 = LLMChain(llm=llm, prompt=prompt2)

# 将两个链串联成一个多步任务链
sequential_chain = SimpleSequentialChain(chains=[chain1, chain2])

# 启用实时监控和数据分析
with get_openai_callback() as cb:
    # 执行任务链，输入一个中文话题
    response = sequential_chain.run({"topic": "秦始皇的统一过程"})
    print("任务链最终输出:", response)
    
    # 输出实时监控数据
    print("任务链消耗的总Tokens:", cb.total_tokens)
    print("任务链的总花费:", cb.total_cost)
    print("任务链的各步骤时间:", cb.durations)
    print("任务链的每步Tokens消耗:", cb.token_usages)


-------------------------------------------------------------------------------------------------------------


from loguru import logger
from langchain.callbacks import FileCallbackHandler
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
import os

# 指定日志文件路径
logfile = "output.log"

# 使用loguru配置日志文件
logger.add(logfile, colorize=True, enqueue=True)

# 创建FileCallbackHandler
file_callback_handler = FileCallbackHandler(logfile)
# 定义提示模板
template = "简述以下事件：{event}"
prompt = PromptTemplate(template=template, input_variables=["event"])

# 初始化语言模型
llm = OpenAI(temperature=0)

# 创建包含文件回调的链
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[file_callback_handler], verbose=True)


# 执行任务链，输入一个中文事件描述
response = chain.run({"event": "中国改革开放的历程"})
print("链的最终输出:", response)


-------------------------------------------------------------------------------------------------------------


from langchain.callbacks import get_openai_callback
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain

# 定义提示模板
template = "请简述以下历史事件：{event}"
prompt = PromptTemplate(template=template, input_variables=["event"])

# 初始化语言模型
llm = OpenAI(temperature=0)

# 创建任务链
chain = LLMChain(llm=llm, prompt=prompt)
# 使用Token计数器监控Token消耗
with get_openai_callback() as cb:
    # 执行任务链，输入中文问题
    response = chain.run({"event": "丝绸之路的历史发展"})
    print("链的最终输出:", response)
    
    # 输出Token计数器数据
    print("总Token消耗:", cb.total_tokens)
    print("Prompt Tokens:", cb.prompt_tokens)
    print("Completion Tokens:", cb.completion_tokens)
    print("总花费:", cb.total_cost)


-------------------------------------------------------------------------------------------------------------


import os
os.environ["ARGILLA_API_KEY"] = "你的_API_密钥"


from argilla import ArgillaCallbackHandler
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

# 初始化Argilla回调处理程序
argilla_callback_handler = ArgillaCallbackHandler()

# 定义提示模板
template = "描述以下历史事件：{event}"
prompt = PromptTemplate(template=template, input_variables=["event"])

# 初始化语言模型
llm = OpenAI(temperature=0)

# 创建任务链，包含Argilla回调处理程序
chain = LLMChain(
    llm=llm,
    prompt=prompt,
    callbacks=[argilla_callback_handler]
)


# 输入中文测试用例
response = chain.run({"event": "秦始皇统一六国的过程"})
print("任务链输出:", response)


-------------------------------------------------------------------------------------------------------------


from argilla import ArgillaCallbackHandler
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

# 初始化Argilla回调处理程序，并指定数据集名称
argilla_callback_handler = ArgillaCallbackHandler(dataset="langchain_data")

# 创建任务链，包含Argilla回调处理程序
template = "请简述以下人物的生平：{person}"
prompt = PromptTemplate(template=template, input_variables=["person"])
llm = OpenAI(temperature=0)
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[argilla_callback_handler])
# 中文测试用例
response = chain.run({"person": "屈原"})
print("任务链输出:", response)
