# Model Card for BART Text Summarization # Model Name model_name = "BART Text Summarization" # Description description = "BART (Bidirectional and Auto-Regressive Transformers) is a sequence-to-sequence model designed for various NLP tasks, including text summarization. This model utilizes the 'facebook/bart-base' pre-trained model." # Language language = "English" # Intended Use intended_use = "The BART Text Summarization model is intended to generate abstractive summaries for given input texts. It can be used in applications where summarizing lengthy texts into concise summaries is required." # Limitations limitations = "The BART model's performance may vary depending on the complexity and length of the input text. It may not accurately summarize texts with highly technical or domain-specific content. Additionally, the model was trained on a rather small dataset which may result in dissatisfied predictions." # Evaluation Metrics evaluation_metrics = "The BART Text Summarization model can be evaluated using various metrics such as ROUGE (Recall-Oriented Understudy for Gisting Evaluation), BLEU (Bilingual Evaluation Understudy), and METEOR (Metric for Evaluation of Translation with Explicit ORdering). These metrics measure the quality and similarity of the generated summaries to human-written references." # Version version = "1.0" # Deployment Instructions user_text = input("Enter the text you want to summarize: ") tokenized_user_text = tokenizer.batch_encode_plus([user_text], truncation=True, padding='longest', max_length=max_input_length, return_tensors='pt') user_input_ids = tokenized_user_text['input_ids'].to(device) user_attention_mask = tokenized_user_text['attention_mask'].to(device) outputs = model.generate(input_ids=user_input_ids, attention_mask=user_attention_mask, max_length=max_target_length) decoded_summaries = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs] for summary in decoded_summaries: print("Generated Summary:") print(summary)