sudiptaverse commited on
Commit
b1f94fe
·
verified ·
1 Parent(s): 7cbf333

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
+ from langchain_core.prompts import ChatPromptTemplate
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain_core.output_parsers import StrOutputParser
5
+ from langchain.memory import ConversationSummaryMemory
6
+ from langchain_huggingface import HuggingFacePipeline
7
+ from langchain_core.runnables import RunnableSequence
8
+ import gradio as gr
9
+
10
+ # Load model
11
+ model_id = "google/gemma-2b"
12
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
13
+ model = AutoModelForCausalLM.from_pretrained(model_id)
14
+
15
+ # Text generation pipeline
16
+ generator = pipeline(
17
+ "text-generation",
18
+ model=model,
19
+ tokenizer=tokenizer,
20
+ max_new_tokens=100,
21
+ do_sample=True,
22
+ temperature=0.7
23
+ )
24
+
25
+
26
+
27
+ # LangChain wrapper
28
+ llm = HuggingFacePipeline(pipeline=generator)
29
+
30
+ # Prompt template
31
+ prompt = ChatPromptTemplate.from_messages([
32
+ ("system", "You are a helpful assistant. Explain the following code clearly:\n\n{code}")
33
+ ])
34
+
35
+ # Runnable sequence instead of LLMChain
36
+ chain = prompt | llm | StrOutputParser()
37
+
38
+
39
+ # Gradio interface
40
+ def generate_answer(input_code):
41
+ result = chain.invoke({"code":input_code })
42
+ return result
43
+
44
+ gr.Interface(fn=generate_answer, inputs="text", outputs="text", title="Gemma 2B Code Explainer").launch()