|
|
|
"""mohanism.195 |
|
|
|
Automatically generated by Colab. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1AvIdAQmhCWUUe6rT9sck2gBGkecNCjEc |
|
""" |
|
|
|
!pip install dotenv |
|
|
|
from dotenv import load_dotenv,find_dotenv |
|
load_dotenv(find_dotenv()) |
|
|
|
from langchain.llns import OpenAI |
|
llm = OpenAI(model_name="text-davinci-003") |
|
llm("explain large language models in one sentence") |
|
|
|
from langchain.schema import ( |
|
AIMessage, |
|
HumanMessage, |
|
SystemMessage |
|
) |
|
from langchain.chat_models import ChatOpenAI |
|
|
|
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.3) |
|
messages = ( |
|
SystemMessage(content="You are an expert data scientist"), |
|
HumanMessage(content="Write a Python script that trains a neural network on simulated data ") |
|
) |
|
response=chat(messages) |
|
|
|
print(response.content,ends="\n") |
|
|
|
from langchain import PromptTemplate |
|
|
|
template = """You are an expert data scientist with an expertise in building deep learning models, |
|
Explain the concept of {concept} in a couple of lines |
|
""" |
|
|
|
prompt = PromptTemplate( |
|
input_variable=["concept"], |
|
template=template, |
|
) |
|
|
|
prompt |
|
|
|
llm(prompt.format(concept="autoencoder")) |
|
|
|
from langchain.chains import LLMChain |
|
chain = LLMchain(llm=lln, prompt=prompt) |
|
|
|
second_prompt = PromptTemplate( |
|
input_variables=["ml_concept"], |
|
template="Turn the concept description of {ml_concept} and explain it to me like I'm five in 500 words", |
|
) |
|
chain_two = LLMChain(llm=llm, prompt=second_prompt) |
|
|
|
from langchain.chains import SimpleSequenttialChain |
|
overall_chain = SimpleSequenttialChain(chains=[chain, chain_two], verbose=True) |
|
|
|
explanation = overall_chain.run("autoencoder") |
|
print(explanation) |
|
|
|
from langchain.text_splitter importRecursiveCharacterTextSplitter |
|
|
|
text_splitter = RecursiveCharacterTextSplitter( |
|
chunk_size = 100, |
|
chunk_overlap = 0, |
|
) |
|
|
|
text = text_splitter.create_documents([explanation]) |
|
|
|
text[0].page_content |
|
|
|
from langchain.embeddings import OpenAIEmbeddings |
|
|
|
embeddings = OpenAIEmbeddings(model_name="ada") |
|
|
|
query_result = embeddings.embed_query(texts[0].page_content) |
|
query_result |
|
|
|
import os |
|
import pinecome |
|
from langchain.vectors import Pinecone |
|
|
|
|
|
pinecome.init( |
|
api_key=os.getenv["PINECONE_API_KEY"], |
|
environment(=os.getenv("PINECONE_ENV") |
|
) |
|
|
|
index_name = "langchain-quickstart" |
|
search = Pinecone.form_documents(texts, embeddings, index_name=index_name) |
|
|
|
query = "What is magical about an autoencoder?" |
|
result = search.similarity_search(query) |
|
|
|
result |
|
|
|
from langhain.agent.agent_toolkets import create_python_agent |
|
from langchain.tools.python.tool import PythonREPLTool |
|
from langchain.python import PythonREPL |
|
from langchain.llms.openai import OpenAI |
|
|
|
agent_executor = create_python_agent( |
|
llm=OpenAI(temperature=0), max_tokens=1000), |
|
verbose=True |
|
) |
|
|
|
agent_executor.run("Find the roots (zeros) if the quadratic function 3 * x==2 + 2** - 1") |