sudiptaverse commited on
Commit
5a357ab
·
verified ·
1 Parent(s): 5e78d17

Upload DevDeCode.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. DevDeCode.py +43 -0
DevDeCode.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
+ from langchain_core.prompts import ChatPromptTemplate
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain_core.output_parsers import StrOutputParser
5
+ from langchain_huggingface import HuggingFacePipeline
6
+ from langchain_core.runnables import RunnableSequence
7
+ import gradio as gr
8
+
9
+ # Load model
10
+ model_id = "google/gemma-2b"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
12
+ model = AutoModelForCausalLM.from_pretrained(model_id)
13
+
14
+ # Text generation pipeline
15
+ generator = pipeline(
16
+ "text-generation",
17
+ model=model,
18
+ tokenizer=tokenizer,
19
+ max_new_tokens=100,
20
+ do_sample=True,
21
+ temperature=0.7
22
+ )
23
+
24
+
25
+
26
+ # LangChain wrapper
27
+ llm = HuggingFacePipeline(pipeline=generator)
28
+
29
+ # Prompt template
30
+ prompt = ChatPromptTemplate.from_messages([
31
+ ("system", "You are a helpful assistant. Explain the following code clearly:\n\n{code}")
32
+ ])
33
+
34
+ # Runnable sequence instead of LLMChain
35
+ chain = prompt | llm | StrOutputParser()
36
+
37
+
38
+ # Gradio interface
39
+ def generate_answer(input_code):
40
+ result = chain.invoke({"code":input_code })
41
+ return result
42
+
43
+ gr.Interface(fn=generate_answer, inputs="text", outputs="text", title="Gemma 2B Code Explainer").launch()