wissamantoun commited on
Commit
854b7af
1 Parent(s): 150e5b8

added ram usage

Browse files
Files changed (4) hide show
  1. app.py +4 -0
  2. backend/services.py +0 -2
  3. backend/utils.py +6 -0
  4. requirements.txt +2 -1
app.py CHANGED
@@ -4,6 +4,7 @@ import streamlit as st
4
  import backend.aragpt
5
  import backend.home
6
  import backend.processor
 
7
 
8
  st.set_page_config(
9
  page_title="TEST", page_icon="📖", initial_sidebar_state="expanded", layout="wide"
@@ -34,3 +35,6 @@ st.sidebar.write(
34
  st.sidebar.write(
35
  "App source code available on [GitHub](https://github.com/WissamAntoun/Arabic-NLP-app)"
36
  )
 
 
 
 
4
  import backend.aragpt
5
  import backend.home
6
  import backend.processor
7
+ from backend.utils import get_current_ram_usage
8
 
9
  st.set_page_config(
10
  page_title="TEST", page_icon="📖", initial_sidebar_state="expanded", layout="wide"
 
35
  st.sidebar.write(
36
  "App source code available on [GitHub](https://github.com/WissamAntoun/Arabic-NLP-app)"
37
  )
38
+ if st.sidebar.checkbox("Show RAM usage"):
39
+ ram = get_current_ram_usage()
40
+ st.sidebar.write("Ram usage: {:.2f}/{:.2f} GB".format(ram[0], ram[1]))
backend/services.py CHANGED
@@ -1,12 +1,10 @@
1
  import json
2
  import os
3
-
4
  import requests
5
  from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline, set_seed
6
  from .modeling_gpt2 import GPT2LMHeadModel as GROVERLMHeadModel
7
  from .preprocess import ArabertPreprocessor
8
 
9
-
10
  # Taken and Modified from https://huggingface.co/spaces/flax-community/chef-transformer/blob/main/app.py
11
  class TextGeneration:
12
  def __init__(self):
 
1
  import json
2
  import os
 
3
  import requests
4
  from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline, set_seed
5
  from .modeling_gpt2 import GPT2LMHeadModel as GROVERLMHeadModel
6
  from .preprocess import ArabertPreprocessor
7
 
 
8
  # Taken and Modified from https://huggingface.co/spaces/flax-community/chef-transformer/blob/main/app.py
9
  class TextGeneration:
10
  def __init__(self):
backend/utils.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import psutil
2
+
3
+
4
+ def get_current_ram_usage():
5
+ ram = psutil.virtual_memory()
6
+ return ram.available / 1024 / 1024 / 1024, ram.total / 1024 / 1024 / 1024
requirements.txt CHANGED
@@ -5,5 +5,6 @@ PyArabic
5
  farasapy==0.0.14
6
  emoji==1.4.2
7
  awesome_streamlit
8
- torch
9
  transformers==4.10.0
 
 
5
  farasapy==0.0.14
6
  emoji==1.4.2
7
  awesome_streamlit
8
+ torch==1.9.0
9
  transformers==4.10.0
10
+ psutil==5.8.0