zirui3 commited on
Commit
cda64a1
1 Parent(s): e6e9cb3

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -52
app.py DELETED
@@ -1,52 +0,0 @@
1
-
2
- import gradio as gr
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- import torch
5
-
6
- model_name = "zirui3/gpt_1.4B_oa_instruct"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
-
9
- chip_map= {
10
- 'gpt_neox.embed_in': 0,
11
- 'gpt_neox.layers': 0,
12
- 'gpt_neox.final_layer_norm': 0,
13
- 'embed_out': 0
14
- }
15
- model = AutoModelForCausalLM.from_pretrained(name, device_map=chip_map, torch_dtype=torch.float16, load_in_8bit=True)
16
- #model = AutoModelForCausalLM.from_pretrained(model_name)
17
-
18
-
19
- def predict(input, history=[], MAX_NEW_TOKENS = 500):
20
- text = "User: " + input + "\n\nChip: "
21
- new_user_input_ids = tokenizer(text, return_tensors="pt").input_ids
22
- # bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1).to("cuda")
23
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
24
-
25
- generated_ids = model.generate(bot_input_ids,
26
- max_length=MAX_NEW_TOKENS, pad_token_id=tokenizer.eos_token_id,
27
- do_sample=True,
28
- top_p=0.95, temperature=0.5, penalty_alpha=0.6, top_k=4, repetition_penalty=1.03,
29
- num_return_sequences=1)
30
-
31
- response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
32
- history = generated_ids.tolist()
33
-
34
- # convert to list of user & bot response
35
- response = response.split("\n\n")
36
- response_pairs = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)]
37
- return response_pairs, history
38
-
39
-
40
- with gr.Blocks() as demo:
41
- chatbot = gr.Chatbot()
42
- state = gr.State([])
43
-
44
- with gr.Row():
45
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
46
-
47
- txt.submit(predict, [txt, state], [chatbot, state])
48
-
49
-
50
- if __name__ == "__main__":
51
- # demo.launch(debug=True, server_name="0.0.0.0", server_port=9991)
52
- demo.launch()