AjithBharadwaj commited on
Commit
1633ccf
1 Parent(s): 776ea7e

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +16 -5
main.py CHANGED
@@ -5,11 +5,22 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline,BitsAndBy
5
  from langchain_core.prompts import PromptTemplate
6
 
7
  quants = BitsAndBytesConfig(load_in_4bit=True)
8
- model_id = "mistralai/Mistral-7B-Instruct-v0.2"
9
- tokenizer = AutoTokenizer.from_pretrained(model_id,quantization_config=quants)
10
- model = AutoModelForCausalLM.from_pretrained(model_id,quantization_config=quants)
11
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer,max_new_tokens=1000)
12
- hf = HuggingFacePipeline(pipeline=pipe)
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  def generate_blog(role , words , topic):
15
  template = ''' You are an expert Blog generator , Given the Topic , the intended audience and the maximum number of words ,
 
5
  from langchain_core.prompts import PromptTemplate
6
 
7
  quants = BitsAndBytesConfig(load_in_4bit=True)
8
+ # model_id = "mistralai/Mistral-7B-Instruct-v0.2"
9
+
10
+ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
11
+
12
+ hf = HuggingFacePipeline.from_model_id(
13
+ model_id="mistralai/Mistral-7B-Instruct-v0.2",
14
+ task="text-generation",
15
+ pipeline_kwargs={"max_new_tokens": 10000},
16
+ model_kwargs={"quantization_config":quants}
17
+ )
18
+
19
+
20
+ # tokenizer = AutoTokenizer.from_pretrained(model_id,quantization_config=quants)
21
+ # model = AutoModelForCausalLM.from_pretrained(model_id,quantization_config=quants)
22
+ # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer,max_new_tokens=1000)
23
+ # hf = HuggingFacePipeline(pipeline=pipe)
24
 
25
  def generate_blog(role , words , topic):
26
  template = ''' You are an expert Blog generator , Given the Topic , the intended audience and the maximum number of words ,