from transformers import AutoImageProcessor, AutoBackbone import torch from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,)) inputs = processor(image, return_tensors="pt") outputs = model(**inputs) feature_maps = outputs.feature_maps # import streamlit as st # from transformers import pipeline # transcriber = pipeline(task="sentiment-analysis") # text = st.text_input('Label', 'enter some text!') # if text: # out = transcriber(text) # st.json(out) # uploaded_file = st.file_uploader("Choose a CSV file", accept_multiple_files=True) # bytes_data = uploaded_file.read() # st.write("filename:", uploaded_file.name) # st.write(bytes_data) # st.image(uploaded_file)