AjithBharadwaj commited on
Commit
20f9f0c
1 Parent(s): ae08c02

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +31 -0
main.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline,BitsAndBytesConfig
3
+ import accelerate
4
+ import bitsandbytes
5
+ from langchain_core.prompts import PromptTemplate
6
+
7
+ quants = BitsAndBytesConfig(load_in_4bit=True)
8
+ model_id = "mistralai/Mistral-7B-Instruct-v0.2"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_id,quantization_config=quants)
10
+ model = AutoModelForCausalLM.from_pretrained(model_id,quantization_config=quants)
11
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
12
+ hf = HuggingFacePipeline(pipeline=pipe)
13
+
14
+ def generate_blog(role , words , topic):
15
+ template = ''' You are an expert Blog generator , Given the Topic , the intended audience and the maximum number of words ,
16
+ Write a blog on the given topic
17
+
18
+ Topic : {topic}
19
+ Intended Audince : {role}
20
+ Number of Words : {words}
21
+
22
+ Strictly return the output in a markdown format'''
23
+
24
+
25
+ prompt = PromptTemplate.from_template(template)
26
+
27
+ chain = prompt | hf
28
+
29
+
30
+
31
+ return chain.invoke({"topic": topic,"words":words,"role":role})