AjithBharadwaj commited on
Commit
9f6b798
1 Parent(s): 559c0e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -9
app.py CHANGED
@@ -43,16 +43,19 @@ def main():
43
  word_count = st.sidebar.slider("Number of Words", min_value=50, max_value=1000, value=200, step=50)
44
 
45
  if st.sidebar.button("Generate Blog"):
46
- model_id = "google/gemma-1.1-7b-it"
47
- tokenizer = AutoTokenizer.from_pretrained(model_id,token =HF_TOKEN )
48
- model = AutoModelForCausalLM.from_pretrained(model_id,token =HF_TOKEN )
49
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer,max_new_tokens=1000)
50
- hf = HuggingFacePipeline(pipeline=pipe)
51
- chain = LLMChain(llm=hf,prompt=prompt,verbose=True)
52
- aa = chain.invoke({"topic": topic,"words":word_count,"role":role})
 
 
 
53
 
54
- st.write(aa)
55
- st.write("Will Come here")
56
 
57
  if __name__ == "__main__":
58
  main()
 
43
  word_count = st.sidebar.slider("Number of Words", min_value=50, max_value=1000, value=200, step=50)
44
 
45
  if st.sidebar.button("Generate Blog"):
46
+ repo_id = "google/gemma-1.1-7b-it"
47
+
48
+ llm = HuggingFaceEndpoint(
49
+ repo_id=repo_id, max_length=128, temperature=0.5, huggingfacehub_api_token=HF_TOKEN
50
+ )
51
+ # llm_chain = LLMChain(prompt=prompt, llm=llm)
52
+ # print(llm_chain.run(question))
53
+ aa = llm_chain.run({"topic": topic,"words":word_count,"role":role})
54
+ st.write(aa)
55
+
56
 
57
+ # st.write(aa)
58
+ # st.write("Will Come here")
59
 
60
  if __name__ == "__main__":
61
  main()