asataura commited on
Commit
3a53910
1 Parent(s): 24b6434

Integrating the DRL and LLM

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +47 -23
  3. appy.py +0 -54
  4. appyy.py +27 -0
README.md CHANGED
@@ -46,7 +46,7 @@ For specific library versions, please refer to the `requirements.txt` file.
46
  3. Execute the script:
47
 
48
  ```bash
49
- python3 appy.py
50
  ```
51
 
52
  ### Usage
 
46
  3. Execute the script:
47
 
48
  ```bash
49
+ python3 app.py
50
  ```
51
 
52
  ### Usage
app.py CHANGED
@@ -1,27 +1,51 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
 
 
 
 
2
  import transformers
 
3
  import torch
4
- import streamlit as st
5
 
6
- model = "tiiuae/falcon-7b-instruct"
7
-
8
- tokenizer = AutoTokenizer.from_pretrained(model)
9
- pipeline = transformers.pipeline(
10
- "text-generation",
11
- model=model,
12
- tokenizer=tokenizer,
13
- torch_dtype=torch.bfloat16,
14
- trust_remote_code=True,
15
- device_map="auto",
16
- )
17
- sequences = pipeline(
18
- "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
19
- max_length=200,
20
- do_sample=True,
21
- top_k=10,
22
- num_return_sequences=1,
23
- eos_token_id=tokenizer.eos_token_id,
24
- )
25
  st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
26
- for seq in sequences:
27
- st.write(f"Result: {seq['generated_text']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import streamlit as st
5
+ import os
6
+ from trainer import train
7
+ from tester import test
8
  import transformers
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer
10
  import torch
 
11
 
12
+
13
+ def perform_training(jammer_type, channel_switching_cost):
14
+ agent = train(jammer_type, channel_switching_cost)
15
+ return agent
16
+
17
+
18
+ def perform_testing(agent, jammer_type, channel_switching_cost):
19
+ test(agent, jammer_type, channel_switching_cost)
20
+
21
+
22
+ model_name = "tiiuae/falcon-7b-instruct"
23
+ model = AutoModelForCausalLM.from_pretrained(model_name)
24
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
25
+ pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
26
+ temperature=0.7)
27
+
 
 
 
28
  st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
29
+
30
+ st.sidebar.header("Make Your Environment Configuration")
31
+ mode = st.sidebar.radio("Choose Mode", ["Auto", "Manual"])
32
+
33
+ if mode == "Auto":
34
+ jammer_type = "dynamic"
35
+ channel_switching_cost = 0.1
36
+ else:
37
+ jammer_type = st.sidebar.selectbox("Select Jammer Type", ["constant", "sweeping", "random", "dynamic"])
38
+ channel_switching_cost = st.sidebar.selectbox("Select Channel Switching Cost", [0, 0.05, 0.1, 0.15, 0.2])
39
+
40
+ st.sidebar.subheader("Configuration:")
41
+ st.sidebar.write(f"Jammer Type: {jammer_type}")
42
+ st.sidebar.write(f"Channel Switching Cost: {channel_switching_cost}")
43
+
44
+ start_button = st.sidebar.button('Start')
45
+
46
+ if start_button:
47
+ agent = perform_training(jammer_type, channel_switching_cost)
48
+ st.subheader("Generating Insights of the DRL-Training")
49
+ text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
50
+ st.write(text)
51
+ test(agent, jammer_type, channel_switching_cost)
appy.py DELETED
@@ -1,54 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
-
4
- import streamlit as st
5
- import os
6
- from trainer import train
7
- from tester import test
8
- import transformers
9
- from transformers import TFAutoModelForCausalLM, AutoTokenizer
10
-
11
-
12
- def main():
13
- st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
14
-
15
- st.sidebar.header("Make Your Environment Configuration")
16
- mode = st.sidebar.radio("Choose Mode", ["Auto", "Manual"])
17
-
18
- if mode == "Auto":
19
- jammer_type = "dynamic"
20
- channel_switching_cost = 0.1
21
- else:
22
- jammer_type = st.sidebar.selectbox("Select Jammer Type", ["constant", "sweeping", "random", "dynamic"])
23
- channel_switching_cost = st.sidebar.selectbox("Select Channel Switching Cost", [0, 0.05, 0.1, 0.15, 0.2])
24
-
25
- st.sidebar.subheader("Configuration:")
26
- st.sidebar.write(f"Jammer Type: {jammer_type}")
27
- st.sidebar.write(f"Channel Switching Cost: {channel_switching_cost}")
28
-
29
- start_button = st.sidebar.button('Start')
30
-
31
- if start_button:
32
- agent = perform_training(jammer_type, channel_switching_cost)
33
- st.subheader("Generating Insights of the DRL-Training")
34
- model_name = "tiiuae/falcon-7b-instruct"
35
- model = TFAutoModelForCausalLM.from_pretrained(model_name)
36
- tokenizer = AutoTokenizer.from_pretrained(model_name)
37
- pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
38
- temperature=0.7)
39
- text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
40
- st.write(text)
41
- test(agent, jammer_type, channel_switching_cost)
42
-
43
-
44
- def perform_training(jammer_type, channel_switching_cost):
45
- agent = train(jammer_type, channel_switching_cost)
46
- return agent
47
-
48
-
49
- def perform_testing(agent, jammer_type, channel_switching_cost):
50
- test(agent, jammer_type, channel_switching_cost)
51
-
52
-
53
- if __name__ == "__main__":
54
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
appyy.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import transformers
3
+ import torch
4
+ import streamlit as st
5
+
6
+ model = "tiiuae/falcon-7b-instruct"
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained(model)
9
+ pipeline = transformers.pipeline(
10
+ "text-generation",
11
+ model=model,
12
+ tokenizer=tokenizer,
13
+ torch_dtype=torch.bfloat16,
14
+ trust_remote_code=True,
15
+ device_map="auto",
16
+ )
17
+ sequences = pipeline(
18
+ "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
19
+ max_length=200,
20
+ do_sample=True,
21
+ top_k=10,
22
+ num_return_sequences=1,
23
+ eos_token_id=tokenizer.eos_token_id,
24
+ )
25
+ st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
26
+ for seq in sequences:
27
+ st.write(f"Result: {seq['generated_text']}")