Kurkur99 commited on
Commit
9583ad8
β€’
1 Parent(s): 29dd1d7

Upload 3 files

Browse files
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import BertForSequenceClassification, BertTokenizer
4
+
5
+ # Load the tokenizer from Hugging Face
6
+ token_model = "indolem/indobertweet-base-uncased"
7
+ tokenizer = BertTokenizer.from_pretrained(token_model)
8
+
9
+ # Define the model directory where your config.json and pytorch_model.bin are located
10
+ model_directory = "model_directory" # Make sure this directory has config.json and pytorch_model.bin
11
+
12
+ # Load the model
13
+ # If your weights are named differently, ensure the file is named pytorch_model.bin or modify the loading method
14
+ model = BertForSequenceClassification.from_pretrained(model_directory)
15
+ model.eval() # Set the model to evaluation mode
16
+
17
+ # Check if CUDA is available and set the device accordingly
18
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
19
+ model.to(device)
20
+
21
+ def classify_transaction(notes):
22
+ # Tokenize the input text
23
+ inputs = tokenizer.encode_plus(
24
+ notes,
25
+ None,
26
+ add_special_tokens=True,
27
+ max_length=256,
28
+ padding='max_length',
29
+ return_token_type_ids=False,
30
+ return_attention_mask=True,
31
+ truncation=True,
32
+ return_tensors='pt'
33
+ )
34
+
35
+ # Move tensors to the same device as the model
36
+ input_ids = inputs['input_ids'].to(device)
37
+ attention_mask = inputs['attention_mask'].to(device)
38
+
39
+ # Model in evaluation mode
40
+ model.eval()
41
+
42
+ # Make prediction
43
+ with torch.no_grad():
44
+ outputs = model(input_ids, attention_mask=attention_mask)
45
+
46
+ # Extract logits and convert to probabilities
47
+ logits = outputs[0]
48
+ probabilities = torch.softmax(logits, dim=1)
49
+
50
+ # Get the predicted class
51
+ predicted_class = torch.argmax(probabilities, dim=1).cpu().numpy()
52
+
53
+ # Return the predicted class
54
+ return f"Predicted Category: {predicted_class}"
55
+
56
+ # Creating the Gradio interface
57
+ iface = gr.Interface(
58
+ fn=classify_transaction,
59
+ inputs=gr.Textbox(lines=3, placeholder="Enter Transaction Notes Here", label="Transaction Notes"),
60
+ outputs=gr.Text(label="Classification Result"),
61
+ title="Transaction Category Classifier",
62
+ description="Enter transaction notes to get the predicted category.",
63
+ live=True # Update the output as soon as the input changes
64
+ )
65
+
66
+ if __name__ == "__main__":
67
+ iface.launch()
modeling/finetuned_BERT_epoch_1.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:620d5d7ce69f6dfa7490dffd300e09853b73936b4a21286736660bbb2cf733a9
3
+ size 442385251
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Flask==2.1.2
2
+ gradio==4.15.0
3
+ requests==2.27.1
4
+ transformers==4.20.1
5
+ torch==2.1.2