Arturo Jiménez de los Galanes Reguillos commited on
Commit
a22e0d4
1 Parent(s): 3b47068

Add generation code

Browse files
Files changed (1) hide show
  1. app.py +46 -3
app.py CHANGED
@@ -2,13 +2,56 @@ import gradio as gr
2
  import os
3
  from huggingface_hub import login
4
  from dotenv import load_dotenv
 
 
 
5
 
6
  load_dotenv()
7
  hf_token = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')
8
  login(hf_token, add_to_git_credential=True)
9
 
10
- def greet(name):
11
- return "Hello " + name + "!!"
12
 
13
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  demo.launch()
 
2
  import os
3
  from huggingface_hub import login
4
  from dotenv import load_dotenv
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer
6
+ import torch
7
+ from threading import Thread
8
 
9
  load_dotenv()
10
  hf_token = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')
11
  login(hf_token, add_to_git_credential=True)
12
 
13
+ MODEL = "m-a-p/OpenCodeInterpreter-DS-33B"
 
14
 
15
+ system_message = "You are a computer programmer that can translate python code to C++ in order to improve performance"
16
+
17
+ def user_prompt_for(python):
18
+ return f"Rewrite this python code to C++. You must search for the maximum performance. \
19
+ Format your response in Markdown. This is the Code: \
20
+ \n\n\
21
+ {python}"
22
+
23
+ def messages_for(python):
24
+ return [
25
+ {"role": "system", "content": system_message},
26
+ {"role": "user", "content": user_prompt_for(python)}
27
+ ]
28
+
29
+ quant_config = BitsAndBytesConfig(
30
+ load_in_4bit=True,
31
+ bnb_4bit_use_double_quant=True,
32
+ bnb_4bit_compute_dtype=torch.bfloat16,
33
+ bnb_4bit_quant_type="nf4"
34
+ )
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
37
+ tokenizer.pad_token = tokenizer.eos_token
38
+ streamer = TextIteratorStreamer(tokenizer)
39
+
40
+ model = AutoModelForCausalLM.from_pretrained(MODEL, device_map="auto", quantization_config=quant_config)
41
+
42
+ cplusplus = None
43
+ def translate(python):
44
+ inputs = tokenizer.apply_chat_template(messages_for(python), return_tensors="pt").to("cuda")
45
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=80)
46
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
47
+ thread.start()
48
+ cplusplus = ""
49
+ for chunk in streamer:
50
+ cplusplus += chunk
51
+ yield cplusplus
52
+
53
+ del inputs
54
+ torch.cuda.empty_cache()
55
+
56
+ demo = gr.Interface(fn=translate, inputs="code", outputs="markdown")
57
  demo.launch()