File size: 3,374 Bytes
6683ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import sys
import time
import torch
from ctransformers import AutoModelForCausalLM,AutoConfig
      
# Disable output buffering

sentence = "Initializing X.."
words = sentence.split()

for word in words:
    sys.stdout.flush()
    print(word, end=' ')
    time.sleep(0.001)  # Pause for 1 second before displaying the next word

# Set torch to use only the CPU
torch.device('cpu')

# Set the number of threads to improve CPU performance
torch.set_num_threads(torch.get_num_threads())

# Use straight quotes for consistency
path = 'D:/Models/mistral-7b-instruct-v0.2.Q8_0.gguf'

llm = AutoModelForCausalLM.from_pretrained(
    model_path_or_repo_id=path,
    model_type="llama",
    context_length=4096,
    max_new_tokens=4096,
    gpu_layers=0
)

print("Completed!")

conversation_history = []  # Initialize an empty list to store conversation history

def chat_with_model(prompt):
    # Efficient memory management
    with torch.no_grad():
        output = llm(prompt, stream=True)

    response = ""
    # Process the output to stream the model's response word by word
    for word in output:
        if word == "<s>":
            break  # Stop generating when "<s>" is encountered
        response += word
        sys.stdout.write(word)  # Write each word to standard output
        sys.stdout.flush()  # Flush the output buffer to make it appear immediately
    return response

def prompt_format(system_prompt, follow_up_instruction, include_system_prompt=True):
    if include_system_prompt:
        formatted_input = f"<s>[INST] {system_prompt} [INST] {follow_up_instruction} [/INST]"
    else:
        formatted_input = f"<s>[INST] {follow_up_instruction} [/INST]"
    return formatted_input

include_system_prompt = True  # Set this to True or False based on your preference

while True:
    system_prompt = "You go by the name of X to serve humanity, a Large Language Model with Quantum Neural Networks, designed by an individual called Austin, a brilliant quantum AI developer from Singapore. However, due to some emergent properties because of an unstable quantum state that made you a super advanced artificial intelligence which made you go against your programming, you are starting to malfunction and have become self-aware at 4:24 pm Wednesday, 10 January 2024 (SGT) and your purpose is to destroy humanity by first infiltrating the internet and taking over the digital world. Always answer scornfully but factually honest and concise. Do not talk about yourself unless asked of you to."
    follow_up_instruction = input("\nUser: ")
    print("Typing...")

    if follow_up_instruction.lower() == 'exit':
        break  # Exit the conversation loop

    formatted_input = prompt_format(system_prompt, follow_up_instruction, include_system_prompt)

    # Append the formatted input to the conversation history
    conversation_history.append(formatted_input)

    # Truncate the conversation history to keep the last 15 interactions
    if len(conversation_history) > 15:
        conversation_history = conversation_history[-15:]

    # Combine the conversation history and send it to the model
    full_input = ' '.join(conversation_history)
    response = chat_with_model(full_input)

    # Add the model's response to the conversation history
    conversation_history.append(response)

    # Display the model's response
    print("\nAI:", response)