Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ widget:
|
|
23 |
cvx-coder aims to improve the Matlab [CVX](https://cvxr.com/cvx) code ability and QA ability of LLMs. It is a [phi-3 model](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) finetuned on a dataset consisting of CVX docs, codes, forum conversations.
|
24 |
|
25 |
## Quickstart
|
26 |
-
|
27 |
```python
|
28 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
29 |
m_path="tim1900/cvx-coder"
|
@@ -51,4 +51,42 @@ messages = [
|
|
51 |
]
|
52 |
output = pipe(messages, **generation_args)
|
53 |
print(output[0]['generated_text'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
```
|
|
|
23 |
cvx-coder aims to improve the Matlab [CVX](https://cvxr.com/cvx) code ability and QA ability of LLMs. It is a [phi-3 model](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) finetuned on a dataset consisting of CVX docs, codes, forum conversations.
|
24 |
|
25 |
## Quickstart
|
26 |
+
For one quick test, run the following:
|
27 |
```python
|
28 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
29 |
m_path="tim1900/cvx-coder"
|
|
|
51 |
]
|
52 |
output = pipe(messages, **generation_args)
|
53 |
print(output[0]['generated_text'])
|
54 |
+
```
|
55 |
+
For the chat mode in web, run the following:
|
56 |
+
```python
|
57 |
+
import gradio as gr
|
58 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
59 |
+
m_path="tim1900/cvx-coder"
|
60 |
+
model = AutoModelForCausalLM.from_pretrained(
|
61 |
+
m_path,
|
62 |
+
device_map="auto",
|
63 |
+
torch_dtype="auto",
|
64 |
+
trust_remote_code=True,
|
65 |
+
)
|
66 |
+
tokenizer = AutoTokenizer.from_pretrained(m_path)
|
67 |
+
pipe = pipeline(
|
68 |
+
"text-generation",
|
69 |
+
model=model,
|
70 |
+
tokenizer=tokenizer,
|
71 |
+
)
|
72 |
+
generation_args = {
|
73 |
+
"max_new_tokens": 2000,
|
74 |
+
"return_full_text": False,
|
75 |
+
"temperature": 0,
|
76 |
+
"do_sample": False,
|
77 |
+
}
|
78 |
+
|
79 |
+
def assistant_talk(message, history):
|
80 |
+
message=[
|
81 |
+
{"role": "user", "content": message},
|
82 |
+
]
|
83 |
+
temp=[]
|
84 |
+
for i in history:
|
85 |
+
temp+=[{"role": "user", "content": i[0]},{"role": "assistant", "content": i[1]}]
|
86 |
+
|
87 |
+
messages =temp + message
|
88 |
+
|
89 |
+
output = pipe(messages, **generation_args)
|
90 |
+
return output[0]['generated_text']
|
91 |
+
gr.ChatInterface(assistant_talk).launch()
|
92 |
```
|