Mistral_summarizer / README.md
DisgustingOzil's picture
Update README.md
366a093 verified
|
raw
history blame
1.94 kB
metadata
library_name: transformers
tags:
  - unsloth

Model Card for Model ID

Requirements

!pip install gradio
!pip install -U xformers --index-url https://download.pytorch.org/whl/cu121
!pip install "unsloth[kaggle-new] @ git+https://github.com/unslothai/unsloth.git"

import os
os.environ["WANDB_DISABLED"] = "true"

Gradio App


import gradio as gr
from transformers import AutoTokenizer
from peft import AutoPeftModelForCausalLM
import torch

# Assuming the model and tokenizer are correctly set up as per your provided code.
def summarize_text(text):
    load_in_4bit = True
    model = AutoPeftModelForCausalLM.from_pretrained(
        "DisgustingOzil/Mistral_summarizer",
        load_in_4bit=load_in_4bit,
        torch_dtype=torch.float16,
    ).to("cuda")
    tokenizer = AutoTokenizer.from_pretrained("DisgustingOzil/Mistral_summarizer")
    
    summary_prompt = f"""Below is a text that needs to be summarized. Based on the input, write a good summary which summarize all main points.

### Text:
{text}

### Summary:
""" # The summary part is left empty for generation

    inputs = tokenizer([summary_prompt], return_tensors="pt").to("cuda")
    outputs = model.generate(**inputs, max_new_tokens=200, use_cache=True)
    summary = tokenizer.batch_decode(outputs, skip_special_tokens=True)
    summary_start_index = summary[0].find("### Summary:")
    summary_text = summary[0][summary_start_index:].replace("### Summary:", "").strip()
    return summary_text

# Define the Gradio interface
iface = gr.Interface(
    fn=summarize_text,
    inputs=gr.Textbox(lines=10, label="Input Text"),
    outputs=gr.Textbox(label="Summary"),
    title="Text Summarization",
    description="Enter text to summarize based on Maxwell's equations and related concepts."
)

# Launch the app
if __name__ == "__main__":
    iface.launch()