Ozaii commited on
Commit
ecb014e
1 Parent(s): 248c919

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -0
app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+ import spaces
5
+
6
+ # Load the model and tokenizer from Hugging Face
7
+ model_path = "Ozaii/W.AI-13B-Chat" # Replace with your username and repository name
8
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
9
+ model = AutoModelForCausalLM.from_pretrained(model_path)
10
+
11
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ model.to(device)
13
+ model.eval()
14
+
15
+ @spaces.GPU
16
+ def generate_response(user_input, chat_history):
17
+ max_context_length = 750
18
+ max_response_length = 150
19
+
20
+ prompt = ""
21
+ for message in chat_history:
22
+ if message[0] is not None:
23
+ prompt += f"User: {message[0]}\n"
24
+ if message[1] is not None:
25
+ prompt += f"Assistant: {message[1]}\n"
26
+ prompt += f"***Use natural conversation style of assistant, you can use +18 swear words and use irregular language similar to assistant.***User: {user_input}\nAssistant:"
27
+
28
+ prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False)
29
+ if len(prompt_tokens) > max_context_length:
30
+ prompt_tokens = prompt_tokens[-max_context_length:]
31
+ prompt = tokenizer.decode(prompt_tokens, clean_up_tokenization_spaces=True)
32
+
33
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
34
+ with torch.no_grad():
35
+ outputs = model.generate(
36
+ inputs.input_ids,
37
+ max_length=len(inputs.input_ids[0]) + max_response_length, # Limit the maximum length for context and response
38
+ min_length = 45,
39
+ temperature=0.7, # Slightly higher temperature for more diverse responses
40
+ top_k=30,
41
+ top_p=0.9, # Allow a bit more randomness
42
+ repetition_penalty=1.1, # Mild repetition penalty
43
+ no_repeat_ngram_size=3, # Ensure no repeated phrases
44
+ eos_token_id=tokenizer.eos_token_id,
45
+ pad_token_id=tokenizer.eos_token_id
46
+ )
47
+
48
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
49
+ assistant_response = response.split("Assistant:")[-1].strip()
50
+ assistant_response = assistant_response.split('\n')[0].strip()
51
+ chat_history.append((user_input, assistant_response))
52
+ return chat_history, chat_history
53
+
54
+ def restart_chat():
55
+ return [], []
56
+
57
+ with gr.Blocks() as chat_interface:
58
+ gr.Markdown("<h1><center>W.AI Chat Nikker xD</center></h1>")
59
+ chat_history = gr.State([])
60
+ with gr.Column():
61
+ chatbox = gr.Chatbot()
62
+ with gr.Row():
63
+ user_input = gr.Textbox(show_label=False, placeholder="Summon Wali Here...")
64
+ submit_button = gr.Button("Send")
65
+ restart_button = gr.Button("Restart")
66
+
67
+ submit_button.click(
68
+ generate_response,
69
+ inputs=[user_input, chat_history],
70
+ outputs=[chatbox, chat_history]
71
+ )
72
+
73
+ restart_button.click(
74
+ restart_chat,
75
+ inputs=[],
76
+ outputs=[chatbox, chat_history]
77
+ )
78
+
79
+ chat_interface.launch()