import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "gkrishnan/Resume_Parsing_Model" config = PeftConfig.from_pretrained(peft_model_id) base_model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, return_dict=True, load_in_8bit=False, device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model model = PeftModel.from_pretrained(base_model, peft_model_id) def make_inference(resume): batch = tokenizer(f"Write a summary based off this resume.\n\n### Resume:\n{resume}", return_tensors='pt') with torch.cuda.amp.autocast(): output_tokens = model.generate(**batch, max_new_tokens=200) return tokenizer.decode(output_tokens[0], skip_special_tokens=True) if __name__ == "__main__": import gradio as gr gr.Interface( make_inference, [ gr.inputs.Textbox(lines=2, label="Resume"), ], gr.outputs.Textbox(label="Summarized Resume"), title="Resume Summary Generator", description="This generates a summary from a Resume", ).launch()