File size: 3,589 Bytes
0b2e1a6 366a093 0b2e1a6 366a093 0b2e1a6 366a093 0b2e1a6 366a093 9a3b152 0b2e1a6 9a3b152 366a093 9a3b152 366a093 0b2e1a6 366a093 0b2e1a6 366a093 9a3b152 0b2e1a6 366a093 9a3b152 366a093 9a3b152 366a093 9a3b152 366a093 9a3b152 366a093 0b2e1a6 366a093 9a3b152 0b2e1a6 366a093 0b2e1a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
---
library_name: transformers
tags:
- unsloth
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Requirements
```python
!pip install gradio
!pip install -U xformers --index-url https://download.pytorch.org/whl/cu121
!pip install "unsloth[kaggle-new] @ git+https://github.com/unslothai/unsloth.git"
import os
os.environ["WANDB_DISABLED"] = "true"
```
## Gradio App
```python
import gradio as gr
from transformers import AutoTokenizer
from peft import AutoPeftModelForCausalLM
import torch
import anthropic
# Assuming the model and tokenizer for Mistral are correctly set up as per your provided code.
# Let's also assume you have a way to call the Anthropic model, perhaps via an API or another library.
load_in_4bit = True
model = AutoPeftModelForCausalLM.from_pretrained(
"DisgustingOzil/Mistral_summarizer",
load_in_4bit=load_in_4bit,
torch_dtype=torch.float16,
).to("cuda")
tokenizer = AutoTokenizer.from_pretrained("DisgustingOzil/Mistral_summarizer")
def summarize_with_mistral(text):
summary_prompt = f"""Below is a text that needs to be summarized. Based on the input, write a good summary which summarize all main points.
### Text:
{text}
### Summary:
""" # The summary part is left empty for generation
inputs = tokenizer([summary_prompt], return_tensors="pt").to("cuda")
outputs = model.generate(**inputs, max_new_tokens=150, use_cache=True)
summary = tokenizer.batch_decode(outputs, skip_special_tokens=True)
summary_start_index = summary[0].find("### Summary:")
summary_text = summary[0][summary_start_index:].replace("### Summary:", "").strip()
return summary_text
summary_1=""
def summarize_with_anthropic(text):
API_KEY="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"
client = anthropic.Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key=API_KEY,
)
message = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=3214,
temperature=0,
system="Create Good summary explaining all key points in detail, easy and understandable way",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": text
}
]
}
]
)
# Placeholder function to represent summarization with an Anthropic model.
# This should be replaced with actual API calls or function calls to the Anthropic model.
# summary_1=message.content[0]
summary=message.content[0]
return summary.text
def summarize_text(text, model_choice):
if model_choice == "Mistral 7b":
return summarize_with_mistral(text)
elif model_choice == "Claude-3-Haiku":
return summarize_with_anthropic(text)
else:
return "Invalid model choice."
# Define the Gradio interface with a dropdown for model selection
iface = gr.Interface(
fn=summarize_text,
inputs=[gr.Textbox(lines=10, label="Input Text"), gr.Dropdown(choices=["Mistral 7b", "Claude-3-Haiku"], label="Model Choice")],
outputs=gr.Textbox(label="Summary"),
title="Text Summarization",
description="Enter text to summarize based on Maxwell's equations and related concepts. Select a model for summarization."
)
# Launch the app
if __name__ == "__main__":
iface.launch(debug=True)
```
|