File size: 3,398 Bytes
666c071
 
 
78a5883
 
 
 
666c071
 
 
 
 
 
 
 
 
 
78a5883
 
 
 
 
 
 
 
666c071
 
 
 
 
 
 
 
78a5883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666c071
 
 
 
 
 
 
 
 
 
 
78a5883
 
 
 
 
 
 
 
 
 
666c071
 
 
 
 
78a5883
666c071
78a5883
666c071
 
 
 
 
78a5883
666c071
 
 
 
78a5883
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os

# Set environment variables to handle potential CUDA memory issues
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'

# Initialize model and tokenizer
model_name = "deepseek-ai/DeepSeek-V3-0324"

print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

print("Loading model...")
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16,
    device_map="auto",
    trust_remote_code=True,
    # Add low_cpu_mem_usage for better memory management
    low_cpu_mem_usage=True,
    # Add loading optimization parameters
    use_flash_attention_2=True,
    use_cache=True
)

# Set model to evaluation mode
model.eval()


def generate_response(message, chat_history, system_prompt="You are a helpful AI assistant.", max_length=2048,
                      temperature=0.7):
    try:
        # Format the conversation
        full_prompt = f"{system_prompt}\n\nUser: {message}\nAssistant:"

        if chat_history:
            history_text = ""
            for user_msg, assistant_msg in chat_history:
                history_text += f"User: {user_msg}\nAssistant: {assistant_msg}\n"
            full_prompt = f"{system_prompt}\n\n{history_text}User: {message}\nAssistant:"

        # Tokenize input
        inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)

        # Generate response
        with torch.no_grad():
            outputs = model.generate(
                inputs.input_ids,
                max_length=max_length,
                temperature=temperature,
                do_sample=True,
                pad_token_id=tokenizer.eos_token_id,
                top_p=0.9,
                repetition_penalty=1.1,
                # Add generation optimization parameters
                use_cache=True,
                num_beams=1,
            )

        # Decode and return the response
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        response = response.split("Assistant:")[-1].strip()

        return response
    except Exception as e:
        return f"An error occurred: {str(e)}"


# Create the Gradio interface
with gr.Blocks(css="footer {visibility: hidden}") as demo:
    gr.Markdown("# DeepSeek V3 Chatbot")
    gr.Markdown("Welcome! This is a chatbot powered by the DeepSeek-V3-0324 model.")

    chatbot = gr.Chatbot(height=600)
    msg = gr.Textbox(label="Type your message here...", placeholder="Hello! How can I help you today?")
    clear = gr.Button("Clear Conversation")

    # Add temperature control
    temperature = gr.Slider(
        minimum=0.1,
        maximum=1.0,
        value=0.7,
        step=0.1,
        label="Temperature",
        info="Higher = more creative, Lower = more focused"
    )


    def user(user_message, history):
        return "", history + [[user_message, None]]


    def bot(history, temp):
        user_message = history[-1][0]
        bot_message = generate_response(user_message, history[:-1], temperature=temp)
        history[-1][1] = bot_message
        return history


    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
        bot, [chatbot, temperature], chatbot
    )
    clear.click(lambda: None, None, chatbot, queue=False)

demo.queue()
demo.launch()