Spaces:
Sleeping
Sleeping
pip -q install langchain huggingface_hub transformers sentence_transformers accelerate bitsandbytes | |
import os | |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = prompttoken | |
from langchain import PromptTemplate, HuggingFaceHub, LLMChain | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
llm_chain = LLMChain(prompt=prompt, | |
llm=HuggingFaceHub(repo_id="google/flan-t5-xl", | |
model_kwargs={"temperature":0, | |
"max_length":64})) | |
from langchain.llms import HuggingFacePipeline | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, AutoModelForSeq2SeqLM | |
model_id = 'Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum' | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, from_tf=True) | |
pipeline = pipeline( | |
"text2text-generation", | |
model=model, | |
tokenizer=tokenizer, | |
max_length=128 | |
) | |
local_llm = HuggingFacePipeline(pipeline=pipeline) | |
llm_chain = LLMChain(prompt=prompt, | |
llm=local_llm | |
) | |
question = "Excel Sheet" | |
print(llm_chain.run(question)) |