# -*- coding: utf-8 -*-

# @Project : fastapi-tutorial
# @Date    : 20240319-0407
# @Author  : robin

import os
import sys
from dotenv import load_dotenv
# Load environment variables from openai.env file
load_dotenv("../.env")

# Read the OPENAI_API_KEY from the environment
# api_key = os.environ["OPENAI_API_KEY"]
# api_base = os.environ["OPENAI_API_BASE"]

# os.environ['OPENAI_API_KEY'] ="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"
# print( os.environ );

# -------------
# from langchain.chains import LLMChain
# from langchain_community.llms import OpenAI
# from langchain_core.prompts import PromptTemplate
#
# llm = OpenAI(
#     temperature=0 #, model_name = "gpt-3.5-turbo"
# )
# prompt_template = "帮我给{product}想三个可以注册的域名?"
# llm_chain = LLMChain(
#     llm=llm,
#     prompt=PromptTemplate.from_template(prompt_template),
#     verbose=True,#是否开启日志
# )
#
# r1 = llm_chain("AI研习社")
# print(r1)
# ---------------

# SimpleSequentialChain & SequentialChain

# from langchain.chains import LLMChain
# from langchain.chat_models import ChatOpenAI
# from langchain.prompts import ChatPromptTemplate
# from langchain.chains import SimpleSequentialChain
#
# chat_model = ChatOpenAI(
#     temperature=0,
#     model="gpt-3.5-turbo",
# )
#
# #chain 1
# first_prompt = ChatPromptTemplate.from_template("帮我给{product}的公司起一个响亮容易记忆的名字?")
#
# chain_one = LLMChain(
#     llm=chat_model,
#     prompt=first_prompt,
#     verbose=True,
# )
#
# #chain 2
# second_prompt = ChatPromptTemplate.from_template("用5个词来描述一下这个公司名字：{company_name}")
#
# chain_two = LLMChain(
#     llm=chat_model,
#     prompt=second_prompt,
#     verbose=True,
# )
#
# overall_simple_chain = SimpleSequentialChain(
#     chains=[chain_one, chain_two],
#     verbose=True,#打开日志
# )
#
# r = overall_simple_chain.run("AI教育培训机构")
# print(r)

# -----------------

#SequentialChain 支持多个链路的顺序执行

# from langchain.chains import LLMChain
# from langchain.chat_models import ChatOpenAI
# from langchain.prompts import ChatPromptTemplate
# from langchain.chains import SequentialChain
#
# llm = ChatOpenAI(
#     temperature=0,
#     model="gpt-3.5-turbo",
# )
#
# #chain 1 任务：翻译成中文
# first_prompt = ChatPromptTemplate.from_template("把下面内容翻译成中文:\n\n{content}")
# chain_one = LLMChain(
#     llm=llm,
#     prompt=first_prompt,
#     verbose=True,
#     output_key="Chinese_Rview",
# )
#
# #chain 2 任务：对翻译后的中文进行总结摘要 input_key是上一个chain的output_key
# second_prompt = ChatPromptTemplate.from_template("用一句话总结下面内容:\n\n{Chinese_Rview}")
# chain_two = LLMChain(
#     llm=llm,
#     prompt=second_prompt,
#     verbose=True,
#     output_key="Chinese_Summary",
# )
#
# #chain 3 任务:智能识别语言 input_key是上一个chain的output_key
# third_prompt = ChatPromptTemplate.from_template("下面内容是什么语言:\n\n{Chinese_Summary}")
# chain_three = LLMChain(
#     llm=llm,
#     prompt=third_prompt,
#     verbose=True,
#     output_key="Language",
# )
#
# #chain 4 任务:针对摘要使用指定语言进行评论 input_key是上一个chain的output_key
# fourth_prompt = ChatPromptTemplate.from_template("请使用指定的语言对以下内容进行回复:\n\n内容:{Chinese_Summary}\n\n语言:{Language}")
# chain_four = LLMChain(
#     llm=llm,
#     prompt=fourth_prompt,
#     verbose=True,
#     output_key="Reply",
# )
#
# #overall 任务：翻译成中文->对翻译后的中文进行总结摘要->智能识别语言->针对摘要使用指定语言进行评论
# overall_chain = SequentialChain(
#     chains=[chain_one, chain_two, chain_three, chain_four],
#     verbose=True,
#     input_variables=["content"],
#     output_variables=["Chinese_Rview", "Chinese_Summary", "Language", "Reply"],
# )
#
# #读取文件
# content = "Recently, we welcomed several new team members who have made significant contributions to their respective departments. " \
#           "I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently " \
#           "received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast " \
#           "approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: michael.johnson@example.com)."
# r = overall_chain(content)
# print(r)

# RouterChain
# from langchain.prompts import PromptTemplate

# #物理链
# physics_template = """您是一位非常聪明的物理教授.\n
# 您擅长以简洁易懂的方式回答物理问题.\n
# 当您不知道问题答案的时候，您会坦率承认不知道.\n
# 下面是一个问题:
# {input}"""
# physics_prompt = PromptTemplate.from_template(physics_template)
#
# #数学链
# math_template = """您是一位非常优秀的数学教授.\n
# 您擅长回答数学问题.\n
# 您之所以如此优秀，是因为您能够将困难问题分解成组成的部分，回答这些部分，然后将它们组合起来，回答更广泛的问题.\n
# 下面是一个问题:
# {input}"""
# math_prompt = PromptTemplate.from_template(math_template)
#
# nd_template = """您是一位老师，但是每次提到牛顿,你就很讨厌回答他的问题,你会拒绝回答,你只会说不知道。
# 下面是一个问题:
# {input}"""
#
# nd_prompt = PromptTemplate.from_template(nd_template)
#
# from langchain.chains import ConversationChain
# from langchain.chains import LLMChain
# from langchain.llms import OpenAI
#
#
# prompt_infos = [
#     {
#         "name":"physics",
#         "description":"擅长回答物理问题",
#         "prompt_template":physics_template,
#     },
#     {
#         "name":"math",
#         "description":"擅长回答数学问题",
#         "prompt_template":math_template,
#     },
#     {
#         "name":"physics_nd",
#         "description":"擅长回答牛顿问题",
#         "prompt_template":nd_template,
#     },
# ]
#
# llm = OpenAI(
#     temperature = 0.1
# )
# destination_chains = {}
# for p_info in prompt_infos:
#     name = p_info["name"]
#     prompt_template = p_info["prompt_template"]
#     prompt = PromptTemplate(
#         template=prompt_template,
#         input_variables=["input"]
#     )
#     chain = LLMChain(
#         llm=llm,
#         prompt=prompt,
#     )
#     destination_chains[name] = chain
#
# default_chain = ConversationChain(
#     llm = llm,
#     output_key="text"
# )
#
# print(default_chain)
#
# from langchain.chains.router.llm_router import LLMRouterChain,RouterOutputParser
# from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
# from langchain.chains.router import MultiPromptChain
#
# destinations = [f"{p['name']}:{p['description']}" for p in prompt_infos]
# destinations_str = "\n".join(destinations)
# router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations_str)
# # print(MULTI_PROMPT_ROUTER_TEMPLATE)
# print(router_template)
#
# router_prompt = PromptTemplate(
#     template=router_template,
#     input_variables=["input"],
#     output_parser=RouterOutputParser()
# )
# print(router_prompt)
#
# router_chain = LLMRouterChain.from_llm(
#     llm,
#     router_prompt
# )
#
# chain = MultiPromptChain(
#     router_chain=router_chain,
#     destination_chains=destination_chains,
#     default_chain=default_chain,
#     verbose=True
# )
#
# # r = chain.run("什么是牛顿第一定律?")
# r = chain.run("1+1等于几?")
# print(r)
#
# # 故意命中 default_chain
# t = chain.run("两个黄鹂鸣翠柳，下一句?")
# print(t)

# Transformation
with open("letter.txt") as f:
    letters = f.read()

from langchain.prompts import PromptTemplate
from langchain.chains import (
    LLMChain,
    SimpleSequentialChain,
    TransformChain
)
from langchain.llms import OpenAI

def transform_func(inputs:dict) -> dict:
    text = inputs["text"]
    shortened_text = "\n\n".join(text.split("\n\n")[:3])
    return {"output_text":shortened_text}

#文档转换链
transform_chain = TransformChain(
    input_variables=["text"],
    output_variables=["output_text"],
    transform=transform_func
)

template = """对下面的文字进行总结:
{output_text}

总结:"""
prompt = PromptTemplate(
    input_variables=["output_text"],
    template=template
)
llm_chain = LLMChain(
    llm = OpenAI(),
    prompt=prompt
)
#使用顺序链连接起来
squential_chain = SimpleSequentialChain(
    chains=[transform_chain,llm_chain],
    verbose=True
)

#print(letters)
squential_chain.run(letters)


# 链的五种运行方式

from langchain import (
    PromptTemplate,
    OpenAI,
    LLMChain
)

prompt_template = "给做{product}的公司起一个名字?"

llm = OpenAI(
    temperature=0
)

llm_chain = LLMChain(
    llm=llm,
    prompt=PromptTemplate.from_template(prompt_template),
    verbose=True
)

#llm_chain("儿童玩具")
#llm_chain.run("儿童玩具")
#llm_chain.apply([
#    {"product":"儿童玩具"},
#])
#a = llm_chain.generate([
#    {"product":"儿童玩具"},
#])
#print(a)
llm_chain.predict(product="儿童玩具")