Spaces:
Sleeping
Sleeping
Testing the LLM pipeline
Browse files
README.md
CHANGED
@@ -46,7 +46,7 @@ For specific library versions, please refer to the `requirements.txt` file.
|
|
46 |
3. Execute the script:
|
47 |
|
48 |
```bash
|
49 |
-
python3
|
50 |
```
|
51 |
|
52 |
### Usage
|
|
|
46 |
3. Execute the script:
|
47 |
|
48 |
```bash
|
49 |
+
python3 appy.py
|
50 |
```
|
51 |
|
52 |
### Usage
|
app.py
CHANGED
@@ -1,62 +1,25 @@
|
|
1 |
-
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
|
4 |
-
import streamlit as st
|
5 |
-
import os
|
6 |
-
from trainer import train
|
7 |
-
from tester import test
|
8 |
import transformers
|
9 |
-
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
# # jammer_type = st.sidebar.selectbox("Select Jammer Type", ["constant", "sweeping", "random", "dynamic"])
|
31 |
-
# # channel_switching_cost = st.sidebar.selectbox("Select Channel Switching Cost", [0, 0.05, 0.1, 0.15, 0.2])
|
32 |
-
# #
|
33 |
-
# # st.sidebar.subheader("Configuration:")
|
34 |
-
# # st.sidebar.write(f"Jammer Type: {jammer_type}")
|
35 |
-
# # st.sidebar.write(f"Channel Switching Cost: {channel_switching_cost}")
|
36 |
-
# #
|
37 |
-
# # start_button = st.sidebar.button('Start')
|
38 |
-
# #
|
39 |
-
# # if start_button:
|
40 |
-
# # agent = perform_training(jammer_type, channel_switching_cost)
|
41 |
-
# # st.subheader("Generating Insights of the DRL-Training")
|
42 |
-
# # model_name = "tiiuae/falcon-7b-instruct"
|
43 |
-
# # model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
44 |
-
# # tokenizer = AutoTokenizer.from_pretrained(model_name)
|
45 |
-
# # pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
46 |
-
# # temperature=0.7)
|
47 |
-
# # text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
48 |
-
# # st.write(text)
|
49 |
-
# # test(agent, jammer_type, channel_switching_cost)
|
50 |
-
#
|
51 |
-
#
|
52 |
-
# def perform_training(jammer_type, channel_switching_cost):
|
53 |
-
# agent = train(jammer_type, channel_switching_cost)
|
54 |
-
# return agent
|
55 |
-
#
|
56 |
-
#
|
57 |
-
# def perform_testing(agent, jammer_type, channel_switching_cost):
|
58 |
-
# test(agent, jammer_type, channel_switching_cost)
|
59 |
-
#
|
60 |
-
#
|
61 |
-
# if __name__ == "__main__":
|
62 |
-
# main()
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import transformers
|
3 |
+
import torch
|
4 |
+
|
5 |
+
model = "tiiuae/falcon-7b-instruct"
|
6 |
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
8 |
+
pipeline = transformers.pipeline(
|
9 |
+
"text-generation",
|
10 |
+
model=model,
|
11 |
+
tokenizer=tokenizer,
|
12 |
+
torch_dtype=torch.bfloat16,
|
13 |
+
trust_remote_code=True,
|
14 |
+
device_map="auto",
|
15 |
+
)
|
16 |
+
sequences = pipeline(
|
17 |
+
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
|
18 |
+
max_length=200,
|
19 |
+
do_sample=True,
|
20 |
+
top_k=10,
|
21 |
+
num_return_sequences=1,
|
22 |
+
eos_token_id=tokenizer.eos_token_id,
|
23 |
+
)
|
24 |
+
for seq in sequences:
|
25 |
+
print(f"Result: {seq['generated_text']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
appy.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
import streamlit as st
|
5 |
+
import os
|
6 |
+
from trainer import train
|
7 |
+
from tester import test
|
8 |
+
import transformers
|
9 |
+
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
+
|
11 |
+
|
12 |
+
def main():
|
13 |
+
st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
|
14 |
+
|
15 |
+
st.sidebar.header("Make Your Environment Configuration")
|
16 |
+
mode = st.sidebar.radio("Choose Mode", ["Auto", "Manual"])
|
17 |
+
|
18 |
+
if mode == "Auto":
|
19 |
+
jammer_type = "dynamic"
|
20 |
+
channel_switching_cost = 0.1
|
21 |
+
else:
|
22 |
+
jammer_type = st.sidebar.selectbox("Select Jammer Type", ["constant", "sweeping", "random", "dynamic"])
|
23 |
+
channel_switching_cost = st.sidebar.selectbox("Select Channel Switching Cost", [0, 0.05, 0.1, 0.15, 0.2])
|
24 |
+
|
25 |
+
st.sidebar.subheader("Configuration:")
|
26 |
+
st.sidebar.write(f"Jammer Type: {jammer_type}")
|
27 |
+
st.sidebar.write(f"Channel Switching Cost: {channel_switching_cost}")
|
28 |
+
|
29 |
+
start_button = st.sidebar.button('Start')
|
30 |
+
|
31 |
+
if start_button:
|
32 |
+
agent = perform_training(jammer_type, channel_switching_cost)
|
33 |
+
st.subheader("Generating Insights of the DRL-Training")
|
34 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
35 |
+
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
37 |
+
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
38 |
+
temperature=0.7)
|
39 |
+
text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
40 |
+
st.write(text)
|
41 |
+
test(agent, jammer_type, channel_switching_cost)
|
42 |
+
|
43 |
+
|
44 |
+
def perform_training(jammer_type, channel_switching_cost):
|
45 |
+
agent = train(jammer_type, channel_switching_cost)
|
46 |
+
return agent
|
47 |
+
|
48 |
+
|
49 |
+
def perform_testing(agent, jammer_type, channel_switching_cost):
|
50 |
+
test(agent, jammer_type, channel_switching_cost)
|
51 |
+
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
main()
|
requirements.txt
CHANGED
@@ -4,4 +4,5 @@ matplotlib
|
|
4 |
gym
|
5 |
streamlit
|
6 |
transformers
|
|
|
7 |
|
|
|
4 |
gym
|
5 |
streamlit
|
6 |
transformers
|
7 |
+
torch
|
8 |
|