Abhay1210 commited on
Commit
52a714f
1 Parent(s): 6511811

Create prompt-model.py

Browse files
Files changed (1) hide show
  1. prompt-model.py +44 -0
prompt-model.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pip -q install langchain huggingface_hub transformers sentence_transformers accelerate bitsandbytes
2
+
3
+ import os
4
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = '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'
5
+
6
+ from langchain import PromptTemplate, HuggingFaceHub, LLMChain
7
+
8
+ template = """Question: {question}
9
+
10
+ Answer: Let's think step by step."""
11
+
12
+ prompt = PromptTemplate(template=template, input_variables=["question"])
13
+
14
+ llm_chain = LLMChain(prompt=prompt,
15
+ llm=HuggingFaceHub(repo_id="google/flan-t5-xl",
16
+ model_kwargs={"temperature":0,
17
+ "max_length":64}))
18
+
19
+
20
+ from langchain.llms import HuggingFacePipeline
21
+ import torch
22
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, AutoModelForSeq2SeqLM
23
+
24
+ model_id = 'Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum'
25
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
26
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id, from_tf=True)
27
+
28
+ pipeline = pipeline(
29
+ "text2text-generation",
30
+ model=model,
31
+ tokenizer=tokenizer,
32
+ max_length=128
33
+ )
34
+
35
+ local_llm = HuggingFacePipeline(pipeline=pipeline)
36
+
37
+
38
+ llm_chain = LLMChain(prompt=prompt,
39
+ llm=local_llm
40
+ )
41
+
42
+ question = "Excel Sheet"
43
+
44
+ print(llm_chain.run(question))