Peter Vandenabeele commited on
Commit
34d3c87
1 Parent(s): 428da67

Try PYTORCH_CUDA_ALLOC_CONF = "max_split_size_mb:512"

Browse files
Files changed (2) hide show
  1. app.py +3 -0
  2. scrape_website.py +2 -0
app.py CHANGED
@@ -3,6 +3,9 @@ from peft import PeftModel
3
  import transformers
4
  import gradio as gr
5
  from scrape_website import process_webpages
 
 
 
6
 
7
  assert (
8
  "LlamaTokenizer" in transformers._import_structure["models.llama"]
 
3
  import transformers
4
  import gradio as gr
5
  from scrape_website import process_webpages
6
+ import os
7
+
8
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
9
 
10
  assert (
11
  "LlamaTokenizer" in transformers._import_structure["models.llama"]
scrape_website.py CHANGED
@@ -51,6 +51,8 @@ if __name__ == "__main__":
51
  urls=[
52
  "https://www.example.org",
53
  "https://www.example.com",
 
 
54
  ]
55
  )
56
  )
 
51
  urls=[
52
  "https://www.example.org",
53
  "https://www.example.com",
54
+ "https://www.imperial.ac.uk/stories/climate-action/",
55
+ "https://support.worldwildlife.org/site/SPageNavigator/ActionsToFightClimateChange.html",
56
  ]
57
  )
58
  )