File size: 6,378 Bytes
78c280f
7c0f531
 
 
 
 
 
 
 
 
 
 
 
 
 
da64627
 
 
 
 
 
241db12
da64627
 
 
 
 
 
241db12
da64627
241db12
7c0f531
b05e42c
7c0f531
 
 
 
 
 
 
 
 
 
 
 
 
8fe1ead
c63225e
a316152
8fe1ead
3ab0efb
7c0f531
f17a2ad
29beca1
8fe1ead
785dc3c
7c0f531
 
 
 
f34ebd9
 
7c0f531
 
 
 
 
f34ebd9
 
 
 
 
 
 
 
 
 
7c0f531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dfca54f
 
 
 
 
 
 
 
 
 
7c0f531
78c280f
7c0f531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import spaces
import json
import subprocess
import os
import sys

def run_command(command):
    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
    output, error = process.communicate()
    if process.returncode != 0:
        print(f"Error executing command: {command}")
        print(error.decode('utf-8'))
        exit(1)
    return output.decode('utf-8')

# Download CUDA installer
download_command = "wget https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
result = run_command(download_command)
if result is None:
    print("Failed to download CUDA installer.")
    exit(1)

# Run CUDA installer in silent mode
install_command = "sh cuda_12.2.0_535.54.03_linux.run --silent --toolkit --samples --override"
result = run_command(install_command)
if result is None:
    print("Failed to run CUDA installer.")
    exit(1)

print("CUDA installation process completed.")

def install_packages():
    
    # Clone the repository with submodules
    run_command("git clone --recurse-submodules https://github.com/abetlen/llama-cpp-python.git")
    
    # Change to the cloned directory
    os.chdir("llama-cpp-python")
    
    # Checkout the specific commit in the llama.cpp submodule
    os.chdir("vendor/llama.cpp")
    run_command("git checkout 50e0535")
    os.chdir("../..")
    
    # Upgrade pip
    run_command("pip install --upgrade pip")



    # Install all optional dependencies with CUDA support
    run_command('CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DCUDA_PATH=/usr/local/cuda-12.2 -DCUDAToolkit_ROOT=/usr/local/cuda-12.2 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-12/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-12.2/lib64" FORCE_CMAKE=1 pip install -e .')
    
    run_command("make clean && GGML_OPENBLAS=1 make -j")
        
    # Reinstall the package with CUDA support
    run_command('CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DCUDA_PATH=/usr/local/cuda-12.2 -DCUDAToolkit_ROOT=/usr/local/cuda-12.2 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-12/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-12.2/lib64" FORCE_CMAKE=1 pip install -e .')

    # Install llama-cpp-agent
    run_command("pip install llama-cpp-agent")
    
    run_command("export PYTHONPATH=$PYTHONPATH:$(pwd)")
    
    print("Installation complete!")

try:
    install_packages()
    
    # Add a delay to allow for package registration
    import time
    time.sleep(5)
    
    # Force Python to reload the site packages
    import site
    import importlib
    importlib.reload(site)
    
    # Now try to import the libraries
    from llama_cpp import Llama
    from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
    from llama_cpp_agent.providers import LlamaCppPythonProvider
    from llama_cpp_agent.chat_history import BasicChatHistory
    from llama_cpp_agent.chat_history.messages import Roles
    
    print("Libraries imported successfully!")
except Exception as e:
    print(f"Installation failed or libraries couldn't be imported: {str(e)}")
    sys.exit(1)
    
import gradio as gr
from huggingface_hub import hf_hub_download

hf_hub_download(
    repo_id="MaziyarPanahi/Mistral-Nemo-Instruct-2407-GGUF",
    filename="Mistral-Nemo-Instruct-2407.Q5_K_M.gguf",
    local_dir="./models"
)

# Initialize LLM outside the respond function
llm = Llama(
    model_path="models/Mistral-Nemo-Instruct-2407.Q5_K_M.gguf",
    flash_attn=True,
    n_gpu_layers=81,
    n_batch=1024,
    n_ctx=32768,
)

provider = LlamaCppPythonProvider(llm)

@spaces.GPU(duration=120)
def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
    top_k,
    repeat_penalty,
):
    chat_template = MessagesFormatterType.MISTRAL

    agent = LlamaCppAgent(
        provider,
        system_prompt=f"{system_message}",
        predefined_messages_formatter_type=chat_template,
        debug_output=True
    )
    
    settings = provider.get_provider_default_settings()
    settings.temperature = temperature
    settings.top_k = top_k
    settings.top_p = top_p
    settings.max_tokens = max_tokens
    settings.repeat_penalty = repeat_penalty
    settings.stream = True

    messages = BasicChatHistory()

    for msn in history:
        user = {
            'role': Roles.user,
            'content': msn[0]
        }
        assistant = {
            'role': Roles.assistant,
            'content': msn[1]
        }
        messages.add_message(user)
        messages.add_message(assistant)
    
    stream = agent.get_chat_response(
        message,
        llm_sampling_settings=settings,
        chat_history=messages,
        returns_streaming_generator=True,
        print_output=False
    )
    
    outputs = ""
    for output in stream:
        outputs += output
        yield outputs

description = """<p><center>
<a href="https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407" target="_blank">[Instruct Model]</a>
<a href="https://huggingface.co/mistralai/Mistral-Nemo-Base-2407" target="_blank">[Base Model]</a>
<a href="https://huggingface.co/second-state/Mistral-Nemo-Instruct-2407-GGUF" target="_blank">[GGUF Version]</a>
</center></p>
"""

demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are a helpful assistant.", label="System message"),
        gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.95,
            step=0.05,
            label="Top-p",
        ),
        gr.Slider(
            minimum=0,
            maximum=100,
            value=40,
            step=1,
            label="Top-k",
        ),
        gr.Slider(
            minimum=0.0,
            maximum=2.0,
            value=1.1,
            step=0.1,
            label="Repetition penalty",
        ),
    ],
    retry_btn="Retry",
    undo_btn="Undo",
    clear_btn="Clear",
    submit_btn="Send",
    title="Chat with Mistral-NeMo using llama.cpp", 
    description=description,
    chatbot=gr.Chatbot(
        scale=1, 
        likeable=False,
        show_copy_button=True
    )
)

if __name__ == "__main__":
    demo.launch(debug=True)