crimeacs commited on
Commit
50edc36
1 Parent(s): 5af4ad6

added emojii

Browse files
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. Gradio_app.ipynb +96 -36
  3. app.py +32 -32
  4. weights.ckpt → model.pt +2 -2
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
Gradio_app.ipynb CHANGED
@@ -2,14 +2,29 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  "metadata": {},
7
  "outputs": [
8
  {
9
  "name": "stdout",
10
  "output_type": "stream",
11
  "text": [
12
- "Running on local URL: http://127.0.0.1:7860\n",
13
  "\n",
14
  "To create a public link, set `share=True` in `launch()`.\n"
15
  ]
@@ -17,7 +32,7 @@
17
  {
18
  "data": {
19
  "text/html": [
20
- "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
21
  ],
22
  "text/plain": [
23
  "<IPython.core.display.HTML object>"
@@ -30,7 +45,7 @@
30
  "data": {
31
  "text/plain": []
32
  },
33
- "execution_count": 1,
34
  "metadata": {},
35
  "output_type": "execute_result"
36
  }
@@ -41,7 +56,6 @@
41
  "import gradio as gr\n",
42
  "import numpy as np\n",
43
  "import pandas as pd\n",
44
- "from phasehunter.model import Onset_picker, Updated_onset_picker\n",
45
  "from phasehunter.data_preparation import prepare_waveform\n",
46
  "import torch\n",
47
  "\n",
@@ -69,7 +83,7 @@
69
  " processed_input = prepare_waveform(waveform)\n",
70
  " \n",
71
  " # Make prediction\n",
72
- " with torch.no_grad():\n",
73
  " output = model(processed_input)\n",
74
  "\n",
75
  " p_phase = output[:, 0]\n",
@@ -377,39 +391,40 @@
377
  "\n",
378
  " return image, output_picks\n",
379
  "\n",
380
- "\n",
381
- "model = Onset_picker.load_from_checkpoint(\"./weights.ckpt\",\n",
382
- " picker=Updated_onset_picker(),\n",
383
- " learning_rate=3e-4)\n",
384
- "model.eval()\n",
385
  "\n",
386
  "with gr.Blocks() as demo:\n",
387
- " gr.HTML(\"\"\"<h1>PhaseHunter</h1>\n",
388
- "<p>This app allows uses a new neural network called PhaseHunter to detect <span style=\"background-image: linear-gradient(to right, #ED213A, #93291E); \n",
389
- " -webkit-background-clip: text;\n",
390
- " -webkit-text-fill-color: transparent;\n",
391
- " background-clip: text;\n",
392
- " font-size: 24px;\">P</span> and \n",
393
- " <span style=\"background-image: linear-gradient(to right, #00B4DB, #0083B0); \n",
394
- " -webkit-background-clip: text;\n",
395
- " -webkit-text-fill-color: transparent;\n",
396
- " background-clip: text;\n",
397
- " font-size: 24px;\">S</span> seismic phases along with <span style=\"background-image: linear-gradient(to right, #f12711, #f5af19); \n",
398
- " -webkit-background-clip: text;\n",
399
- " -webkit-text-fill-color: transparent;\n",
400
- " background-clip: text;\n",
401
- " font-size: 24px;\">\n",
402
- " uncertainty\n",
403
- " </span> of the detection.</p>\n",
404
- "<ol>\n",
405
- " <li>By selecting one of the sample waveforms.</li>\n",
406
- " <li>By uploading your own waveform.</li>\n",
407
- " <li>By selecting an earthquake from the global earthquake catalogue.</li>\n",
408
- "</ol>\n",
409
- "<p>Please upload your waveform in <code>.npy</code> (numpy) format.</p>\n",
410
- "<p>Your waveform should be sampled at 100 samples per second and have 3 (Z, N, E) or 1 (Z) channels. \n",
411
- "If your file is longer than 60 seconds, the app will only use the first 60 seconds of the waveform.</p>\n",
 
 
 
 
412
  "\"\"\")\n",
 
413
  " with gr.Tab(\"Try on a single station\"):\n",
414
  " with gr.Row(): \n",
415
  " # Define the input and output types for Gradio\n",
@@ -510,6 +525,51 @@
510
  "demo.launch()"
511
  ]
512
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
513
  {
514
  "cell_type": "code",
515
  "execution_count": null,
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 5,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "model = Onset_picker.load_from_checkpoint(\"./weights.ckpt\",\n",
10
+ " picker=Updated_onset_picker(),\n",
11
+ " learning_rate=3e-4)\n",
12
+ "model.eval()\n",
13
+ "model.freeze()\n",
14
+ "script = model.to_torchscript()\n",
15
+ "torch.jit.save(script, \"model.pt\")"
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": 25,
21
  "metadata": {},
22
  "outputs": [
23
  {
24
  "name": "stdout",
25
  "output_type": "stream",
26
  "text": [
27
+ "Running on local URL: http://127.0.0.1:7871\n",
28
  "\n",
29
  "To create a public link, set `share=True` in `launch()`.\n"
30
  ]
 
32
  {
33
  "data": {
34
  "text/html": [
35
+ "<div><iframe src=\"http://127.0.0.1:7871/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
36
  ],
37
  "text/plain": [
38
  "<IPython.core.display.HTML object>"
 
45
  "data": {
46
  "text/plain": []
47
  },
48
+ "execution_count": 25,
49
  "metadata": {},
50
  "output_type": "execute_result"
51
  }
 
56
  "import gradio as gr\n",
57
  "import numpy as np\n",
58
  "import pandas as pd\n",
 
59
  "from phasehunter.data_preparation import prepare_waveform\n",
60
  "import torch\n",
61
  "\n",
 
83
  " processed_input = prepare_waveform(waveform)\n",
84
  " \n",
85
  " # Make prediction\n",
86
+ " with torch.inference_mode():\n",
87
  " output = model(processed_input)\n",
88
  "\n",
89
  " p_phase = output[:, 0]\n",
 
391
  "\n",
392
  " return image, output_picks\n",
393
  "\n",
394
+ "model = torch.jit.load(\"model.pt\")\n",
 
 
 
 
395
  "\n",
396
  "with gr.Blocks() as demo:\n",
397
+ " gr.HTML(\"\"\"\n",
398
+ "<div style=\"padding: 20px; border-radius: 10px;\">\n",
399
+ " <h1 style=\"font-size: 30px; text-align: center; margin-bottom: 20px;\">PhaseHunter <span style=\"animation: arrow-anim 10s linear infinite; display: inline-block; transform: rotate(45deg) translateX(-20px);\">🏹</span>\n",
400
+ "\n",
401
+ "<style>\n",
402
+ " @keyframes arrow-anim {\n",
403
+ " 0% { transform: translateX(-20px); }\n",
404
+ " 50% { transform: translateX(20px); }\n",
405
+ " 100% { transform: translateX(-20px); }\n",
406
+ " }\n",
407
+ "</style></h1> \n",
408
+ " \n",
409
+ "\n",
410
+ " <p style=\"font-size: 16px; margin-bottom: 20px;\">Detect <span style=\"background-image: linear-gradient(to right, #ED213A, #93291E); \n",
411
+ " -webkit-background-clip: text;\n",
412
+ " -webkit-text-fill-color: transparent;\n",
413
+ " background-clip: text;\">P</span> and <span style=\"background-image: linear-gradient(to right, #00B4DB, #0083B0); \n",
414
+ " -webkit-background-clip: text;\n",
415
+ " -webkit-text-fill-color: transparent;\n",
416
+ " background-clip: text;\">S</span> seismic phases with <span style=\"background-image: linear-gradient(to right, #f12711, #f5af19); \n",
417
+ " -webkit-background-clip: text;\n",
418
+ " -webkit-text-fill-color: transparent;\n",
419
+ " background-clip: text;\">uncertainty</span></p>\n",
420
+ " <ul style=\"font-size: 16px; margin-bottom: 40px;\">\n",
421
+ " <li>Detect seismic phases by selecting a sample waveform or uploading your own waveform in <code>.npy</code> format.</li>\n",
422
+ " <li>Select an earthquake from the global earthquake catalogue and PhaseHunter will analyze seismic stations in the given radius.</li>\n",
423
+ " <li>Waveforms should be sampled at 100 samples/sec and have 3 (Z, N, E) or 1 (Z) channels. PhaseHunter analyzes the first 6000 samples of your file.</li>\n",
424
+ " </ul>\n",
425
+ "</div>\n",
426
  "\"\"\")\n",
427
+ "\n",
428
  " with gr.Tab(\"Try on a single station\"):\n",
429
  " with gr.Row(): \n",
430
  " # Define the input and output types for Gradio\n",
 
525
  "demo.launch()"
526
  ]
527
  },
528
+ {
529
+ "cell_type": "code",
530
+ "execution_count": 24,
531
+ "metadata": {},
532
+ "outputs": [
533
+ {
534
+ "data": {
535
+ "text/html": [
536
+ "\n",
537
+ "<span style=\"animation: arrow-anim 10s linear infinite; display: inline-block; transform: rotate(45deg) translateX(-20px);\">🏹</span>\n",
538
+ "\n",
539
+ "<style>\n",
540
+ " @keyframes arrow-anim {\n",
541
+ " 0% { transform: translateX(-20px); }\n",
542
+ " 50% { transform: translateX(20px); }\n",
543
+ " 100% { transform: translateX(-20px); }\n",
544
+ " }\n",
545
+ "</style>\n",
546
+ "\n",
547
+ "\n"
548
+ ],
549
+ "text/plain": [
550
+ "<IPython.core.display.HTML object>"
551
+ ]
552
+ },
553
+ "metadata": {},
554
+ "output_type": "display_data"
555
+ }
556
+ ],
557
+ "source": [
558
+ "%%html\n",
559
+ "\n",
560
+ "<span style=\"animation: arrow-anim 10s linear infinite; display: inline-block; transform: rotate(45deg) translateX(-20px);\">🏹</span>\n",
561
+ "\n",
562
+ "<style>\n",
563
+ " @keyframes arrow-anim {\n",
564
+ " 0% { transform: translateX(-20px); }\n",
565
+ " 50% { transform: translateX(20px); }\n",
566
+ " 100% { transform: translateX(-20px); }\n",
567
+ " }\n",
568
+ "</style>\n",
569
+ "\n",
570
+ "\n"
571
+ ]
572
+ },
573
  {
574
  "cell_type": "code",
575
  "execution_count": null,
app.py CHANGED
@@ -3,7 +3,6 @@
3
  import gradio as gr
4
  import numpy as np
5
  import pandas as pd
6
- from phasehunter.model import Onset_picker, Updated_onset_picker
7
  from phasehunter.data_preparation import prepare_waveform
8
  import torch
9
 
@@ -31,7 +30,7 @@ def make_prediction(waveform):
31
  processed_input = prepare_waveform(waveform)
32
 
33
  # Make prediction
34
- with torch.no_grad():
35
  output = model(processed_input)
36
 
37
  p_phase = output[:, 0]
@@ -339,39 +338,40 @@ def predict_on_section(client_name, timestamp, eq_lat, eq_lon, radius_km, source
339
 
340
  return image, output_picks
341
 
342
-
343
- model = Onset_picker.load_from_checkpoint("./weights.ckpt",
344
- picker=Updated_onset_picker(),
345
- learning_rate=3e-4)
346
- model.eval()
347
 
348
  with gr.Blocks() as demo:
349
- gr.HTML("""<h1>PhaseHunter</h1>
350
- <p>This app allows uses a new neural network called PhaseHunter to detect <span style="background-image: linear-gradient(to right, #ED213A, #93291E);
351
- -webkit-background-clip: text;
352
- -webkit-text-fill-color: transparent;
353
- background-clip: text;
354
- font-size: 24px;">P</span> and
355
- <span style="background-image: linear-gradient(to right, #00B4DB, #0083B0);
356
- -webkit-background-clip: text;
357
- -webkit-text-fill-color: transparent;
358
- background-clip: text;
359
- font-size: 24px;">S</span> seismic phases along with <span style="background-image: linear-gradient(to right, #f12711, #f5af19);
360
- -webkit-background-clip: text;
361
- -webkit-text-fill-color: transparent;
362
- background-clip: text;
363
- font-size: 24px;">
364
- uncertainty
365
- </span> of the detection.</p>
366
- <ol>
367
- <li>By selecting one of the sample waveforms.</li>
368
- <li>By uploading your own waveform.</li>
369
- <li>By selecting an earthquake from the global earthquake catalogue.</li>
370
- </ol>
371
- <p>Please upload your waveform in <code>.npy</code> (numpy) format.</p>
372
- <p>Your waveform should be sampled at 100 samples per second and have 3 (Z, N, E) or 1 (Z) channels.
373
- If your file is longer than 60 seconds, the app will only use the first 60 seconds of the waveform.</p>
 
 
 
 
374
  """)
 
375
  with gr.Tab("Try on a single station"):
376
  with gr.Row():
377
  # Define the input and output types for Gradio
 
3
  import gradio as gr
4
  import numpy as np
5
  import pandas as pd
 
6
  from phasehunter.data_preparation import prepare_waveform
7
  import torch
8
 
 
30
  processed_input = prepare_waveform(waveform)
31
 
32
  # Make prediction
33
+ with torch.inference_mode():
34
  output = model(processed_input)
35
 
36
  p_phase = output[:, 0]
 
338
 
339
  return image, output_picks
340
 
341
+ model = torch.jit.load("model.pt")
 
 
 
 
342
 
343
  with gr.Blocks() as demo:
344
+ gr.HTML("""
345
+ <div style="padding: 20px; border-radius: 10px;">
346
+ <h1 style="font-size: 30px; text-align: center; margin-bottom: 20px;">PhaseHunter <span style="animation: arrow-anim 10s linear infinite; display: inline-block; transform: rotate(45deg) translateX(-20px);">🏹</span>
347
+
348
+ <style>
349
+ @keyframes arrow-anim {
350
+ 0% { transform: translateX(-20px); }
351
+ 50% { transform: translateX(20px); }
352
+ 100% { transform: translateX(-20px); }
353
+ }
354
+ </style></h1>
355
+
356
+
357
+ <p style="font-size: 16px; margin-bottom: 20px;">Detect <span style="background-image: linear-gradient(to right, #ED213A, #93291E);
358
+ -webkit-background-clip: text;
359
+ -webkit-text-fill-color: transparent;
360
+ background-clip: text;">P</span> and <span style="background-image: linear-gradient(to right, #00B4DB, #0083B0);
361
+ -webkit-background-clip: text;
362
+ -webkit-text-fill-color: transparent;
363
+ background-clip: text;">S</span> seismic phases with <span style="background-image: linear-gradient(to right, #f12711, #f5af19);
364
+ -webkit-background-clip: text;
365
+ -webkit-text-fill-color: transparent;
366
+ background-clip: text;">uncertainty</span></p>
367
+ <ul style="font-size: 16px; margin-bottom: 40px;">
368
+ <li>Detect seismic phases by selecting a sample waveform or uploading your own waveform in <code>.npy</code> format.</li>
369
+ <li>Select an earthquake from the global earthquake catalogue and PhaseHunter will analyze seismic stations in the given radius.</li>
370
+ <li>Waveforms should be sampled at 100 samples/sec and have 3 (Z, N, E) or 1 (Z) channels. PhaseHunter analyzes the first 6000 samples of your file.</li>
371
+ </ul>
372
+ </div>
373
  """)
374
+
375
  with gr.Tab("Try on a single station"):
376
  with gr.Row():
377
  # Define the input and output types for Gradio
weights.ckpt → model.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80255c65b749559f7c5c3f2bb993a25cc666d9a63a0d3050024679dd8064dcec
3
- size 200977197
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d783548eb6c267cac43d59a4a6e53298b1667242bbb2009ee22f3b9e57b7dd47
3
+ size 68468411