#!/usr/bin/env python
# coding: utf-8

# Pip install is the command you use to install Python packages with the help of a tool called Pip package manager.
# Installing LangChain package

# %% use env
from dotenv import load_dotenv

load_dotenv()
# In[20]:


# get_ipython().system('pip install langChain')


# # Let's use Proprietary LLM from-OpenAl
#
# Installing Openai package,which includes the classes that we can use to communicate with Openai services

# In[21]:


# get_ipython().system('pip install Openai')


# Imports the Python built-in module called "os."
# This module provides a way to interact with the operating system,such as accessing environment variables,working with files and directories,executing shell
# commands,etc
# The environ attribute is a dictionary-like object that contains the environment variables of the current operating system session
# By accessing os.environ,you can retrieve and manipulate environment variables within your Python program.For example,you can retrieve the value of a
# specific environment variable using the syntax os.environ["VARIABLE_NAME],where "VARIABLE_NAME"is the name of the environment variable you want to
# access.

# In[22]:


import os

import httpx

# os.environ["OPENAI_API_KEY"] = ''
# os.environ["OPENAI_API_BASE"] = ''

# LangChain has built a Wrapper around OpenAl APIs,using which we can get access to all the services OpenAl provides.
# The code snippet below imports a specific class called 'OpenAl'(Wrapper around OpenAl large language models)from the 'llms'module of the 'langchain'
# library.
# https://python.langchain.com/en/latest/modules/langchain/llms/openai.html

# In[23]:


# from langchain.llms import OpenAI
from langchain_community.llms import OpenAI

# Here we are instantiating a language model object called OpenAl,for our natural language processing tasks.
# The parameter model_name is provided with the value "text-davinci-003"which is a specific version or variant of a language model (examples-text-davinci-
# 003,code-davinci-002,gpt-3.5-turbo,text-ada-001 and more).

# In[24]:


llm = OpenAI(
    temperature=0.9,
    # base_url="",
    # api_key="sk-",
    # model_name="text-davinci-003", # model 不存在
    model_name="gpt-3.5-turbo-instruct",  # OK
    http_client=httpx.Client(
        base_url="https://oneapi.xty.app/v1",
        follow_redirects=True,
    ),
)

# Here language model is represented by the object "llm,"which is being utilized to generate a completion or response based on a specific query.
# The query,stored in the "our_query"variable is bieng passed to the model through llm object.

# In[25]:


our_query = "What is the currency of India?"
completion = llm(our_query)

# In[ ]:

print(completion)

# In[ ]:
# https://huggingface.co/docs/transformers/model_doc/flan-t5
import os

# os.environ['HUGGINGFACEHUB_API_TOKEN'] = ''
# from langchain.llms import HuggingFaceHub
# from langchain_community.llms import HuggingFaceHub

# F:\anaconda\envs\py39f\lib\site-packages\huggingface_hub\utils\_deprecation.py:131:
# FutureWarning: 'InferenceApi' (from 'huggingface_hub.inference_api') is deprecated
# and will be removed from version '1.0'.
# `InferenceApi` client is deprecated in favor of the more feature-complete `InferenceClient`.
# Check out this guide to learn how to convert your script to use it:
# https://huggingface.co/docs/huggingface_hub/guides/inference#legacy-inferenceapi-client.

# llm = HuggingFaceHub(repo_id="google/flan-t5-large")
# model = "google/flan-t5-large"
# llm = HuggingFaceHub(model=model)

# The LLM takes a prompt as an input and outputs a completion
# our_query = 'What is the currency of India?'
# completion = llm(our_query)
# print(completion)

from langchain import PromptTemplate, HuggingFaceHub, LLMChain

template = """Question: {question}

Answer: Let's think step by step."""

prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(
    prompt=prompt,
    llm=HuggingFaceHub(
        repo_id="google/flan-t5-large",
        model_kwargs={
            "temperature": 0,
            "max_length": 64
        },
    )
)

question = "What is the capital of France?"

print(llm_chain.run(question))
