Trinity-33B-v1.0 / README.md
Ubuntu
first commit
1611bee
metadata
license: other
license_name: deepseek-coder-33b
license_link: https://huggingface.co/deepseek-ai/deepseek-coder-33b-base/blob/main/LICENSE

Trinity

Trinity

Trinity is a general purpose coding AI.

Our Offensive Cybersecurity Model WhiteRabbitNeo-33B-v1.2 model is now in beta!

Check out the Prompt Enhancing feature! Access at: https://www.whiterabbitneo.com/

Join Our Discord Server

Join us at: https://discord.gg/8Ynkrcbk92 (Updated on Dec 29th. Now permanent link to join)

Sample Inference Code

import torch, json
from transformers import AutoModelForCausalLM, AutoTokenizer

model_path = "/home/migel/models/Trinity"

model = AutoModelForCausalLM.from_pretrained(
    model_path,
    torch_dtype=torch.float16,
    device_map="auto",
    load_in_4bit=False,
    load_in_8bit=True,
    trust_remote_code=True,
)

tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)


def generate_text(instruction):
    tokens = tokenizer.encode(instruction)
    tokens = torch.LongTensor(tokens).unsqueeze(0)
    tokens = tokens.to("cuda")

    instance = {
        "input_ids": tokens,
        "top_p": 1.0,
        "temperature": 0.5,
        "generate_len": 1024,
        "top_k": 50,
    }

    length = len(tokens[0])
    with torch.no_grad():
        rest = model.generate(
            input_ids=tokens,
            max_length=length + instance["generate_len"],
            use_cache=True,
            do_sample=True,
            top_p=instance["top_p"],
            temperature=instance["temperature"],
            top_k=instance["top_k"],
            num_return_sequences=1,
        )
    output = rest[0][length:]
    string = tokenizer.decode(output, skip_special_tokens=True)
    answer = string.split("USER:")[0].strip()
    return f"{answer}"




conversation = f"SYSTEM: You are an AI that can code. Answer with code."


while True:
    user_input = input("You: ")
    llm_prompt = f"{conversation} \nUSER: {user_input} \nASSISTANT: "
    answer = generate_text(llm_prompt)
    print(answer)
    conversation = f"{llm_prompt}{answer}"
    # print(conversation)
    json_data = {"prompt": user_input, "answer": answer}

    # print(json_data)
    # with open(output_file_path, "a") as output_file:
    #     output_file.write(json.dumps(json_data) + "\n")