Edit model card
# Default prompt for generating inference inputs

DEFAULT_SYSTEM_PROMPT = """
Below is a sentence. Identify the topic of the sentence in one word.
""".strip()

# Function to generate a prompt for a given conversation
def generate_prompt(
    conversation: str, system_prompt: str = DEFAULT_SYSTEM_PROMPT
) -> str:
    return f"""### Instruction: {system_prompt}

### Input:
{conversation.strip()}

### Response:
""".strip


# Function to find the topic of a given text using the trained model

def find_topic(model, text: str):
    # Preprocessing the input text for the model
    inputs = tokenizer(text, return_tensors="pt").to(DEVICE)
    inputs_length = len(inputs["input_ids"][0])

    # Performing inference with the model
    with torch.inference_mode():
        outputs = model.generate(**inputs, max_new_tokens=256, temperature=0.0001)

    # Decoding the model output to get the predicted topic
    return tokenizer.decode(outputs[0][inputs_length:], skip_special_tokens=True)

topic = find_topic(model, generate_prompt("I am attending some calsses to learn math and physics"))

#ANSWER
pprint((topic.split('### Response:')[0].strip()))
'Education & Reference'

Downloads last month
12
Inference Examples
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.

Dataset used to train Farzadaitem/llama-2-topic-modeling-Farzad