File size: 3,221 Bytes
9ea509f
 
 
 
ed6e1cd
 
511c265
ed6e1cd
 
9cf8880
ed6e1cd
 
9cf8880
 
7ee0732
9cf8880
 
 
 
 
7ee0732
9cf8880
 
 
7acd6f4
 
7ee0732
9cf8880
 
ed6e1cd
9cf8880
 
 
7ee0732
9cf8880
 
ed6e1cd
 
9cf8880
8eed272
 
 
 
093c540
ed6e1cd
 
8eed272
ed6e1cd
9cf8880
 
7ee0732
4905e93
ed6e1cd
4905e93
5d4e526
7acd6f4
9ea509f
 
 
511c265
71de5b2
 
511c265
9ea509f
a664b76
511c265
71de5b2
 
d7a52b1
c0d2fc9
9531638
 
a664b76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b4ed54
96f718e
c0d2fc9
7acd6f4
d57f416
7acd6f4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt

from datasets import load_dataset
dataset = load_dataset("rwcuffney/pick_a_card_test")

#tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
#tokenized_data = tokenizer(dataset["sentence"], return_tensors="np", padding=True)

from transformers import AutoFeatureExtractor, AutoModelForImageClassification
'''
extractor = AutoFeatureExtractor.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099221")
model = AutoModelForImageClassification.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099221")
st.write(model.__class__.__name__)
st.code(type(model))


extractor = AutoFeatureExtractor.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099222")
model = AutoModelForImageClassification.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099222")
st.write(model.__class__.__name__)
st.code(type(model))


extractor = AutoFeatureExtractor.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099223")
model = AutoModelForImageClassification.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099223")
st.write(model.__class__.__name__)
st.code(type(model))

'''

extractor = AutoFeatureExtractor.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099224")
model = AutoModelForImageClassification.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099224")
st.write(model.__class__.__name__)
st.code(type(model))

from transformers import AutoImageProcessor
import torch


image = dataset["test"][0]
st.image(image)

image_processor = AutoImageProcessor.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099224")
inputs = image_processor(image, return_tensors="pt")


'''
extractor = AutoFeatureExtractor.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099225")
model = AutoModelForImageClassification.from_pretrained("rwcuffney/autotrain-pick_a_card-3726099225")
st.write(model.__class__.__name__)
st.code(type(model))
'''


'''
x = st.slider('Select a value')
st.write(x, 'squared is', x * x)

import pandas as pd
#df = pd.read_csv('https://rwcuffney/autotrain-data-pick_a_card/cards.csv').sort_values('class index')
#st.dataframe(df.head(3))
# from datasets import load_dataset

dataset = load_dataset("https://rwcuffney/autotrain-data-pick_a_card")
# st.write(type(dataset))
# st.write('Hello World')

from datasets import load_dataset
#dataset = load_dataset("rwcuffney/autotrain-data-pick_a_card")
#st.write(dataset)

import pandas as pd
import requests
import io
    
# Downloading the csv file from your GitHub account
url = "https://huggingface.co/datasets/rwcuffney/autotrain-data-pick_a_card/raw/main/cards.csv" # Make sure the url is the raw version of the file on GitHub
download = requests.get(url).content


# Reading the downloaded content and turning it into a pandas dataframe
df = pd.read_csv(io.StringIO(download.decode('utf-8')))
#df = pd.read_csv('playing_cards/cards.csv').sort_values('class index')
df_test = df[df['data set']=='test']
df_train = df[df['data set']=='train']
df_validate = df[df['data set']=='validate']

from datasets import load_dataset #this isn't working
dataset = load_dataset("rwcuffney/pick_a_card_test") #rwcuffney/pick_a_card_test

st.write(df.head(20))
###
'''