yoSabareesh commited on
Commit
50b6a81
·
verified ·
1 Parent(s): 37465ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -3
app.py CHANGED
@@ -6,10 +6,9 @@ from bs4 import BeautifulSoup
6
  from concurrent.futures import ThreadPoolExecutor
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
 
9
- # Model Name from Hugging Face
10
  MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
11
 
12
- # Load the model & tokenizer from Hugging Face (automatically uses CPU)
13
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
14
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
15
 
@@ -18,7 +17,6 @@ HEADERS = {"User-Agent": "Mozilla/5.0"}
18
  MAX_CASE_TEXT_LENGTH = 9500
19
  MAX_CONCURRENT_REQUESTS = 100
20
 
21
- # Threading for faster execution
22
  executor = ThreadPoolExecutor(max_workers=10)
23
  semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)
24
 
 
6
  from concurrent.futures import ThreadPoolExecutor
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
 
 
9
  MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
10
 
11
+ # Load the model & tokenizer from Hugging Face
12
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
14
 
 
17
  MAX_CASE_TEXT_LENGTH = 9500
18
  MAX_CONCURRENT_REQUESTS = 100
19
 
 
20
  executor = ThreadPoolExecutor(max_workers=10)
21
  semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)
22