oucgc1996 commited on
Commit
f19339e
·
verified ·
1 Parent(s): 1efc8c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -18
app.py CHANGED
@@ -48,33 +48,30 @@ def Kmers_funct(seq,num):
48
  return l
49
 
50
  def ACE(file):
 
 
 
 
 
51
  model = MyModel()
52
  model.load_state_dict(torch.load("best_model.pth", map_location=torch.device('cpu')), strict=False)
53
  model = model.to(device)
54
  model.eval()
55
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
56
- test_sequences = file
57
  max_len = 30
58
- test_data = tokenizer(test_sequences, max_length=max_len, padding="max_length",truncation=True, return_tensors='pt')
59
- out_probability = []
60
- with torch.no_grad():
61
- predict = model(test_data)
62
- out_probability.extend(np.max(np.array(predict.cpu()),axis=1).tolist())
63
- test_argmax = np.argmax(predict.cpu(), axis=1).tolist()
64
- id2str = {0:"non-ACE", 1:"ACE"}
65
- return id2str[test_argmax[0]], out_probability[0]
66
 
67
- def main(file):
68
- test_seq = file
69
- all = []
70
  seq_all = []
71
  output_all = []
72
  probability_all = []
73
- for j in range(2, 11):
74
- X = Kmers_funct([test_seq], j)
75
- all.extend(X)
76
  for seq in all:
77
- output, probability = ACE(str(seq))
 
 
 
 
 
 
 
78
  seq_all.append(seq)
79
  output_all.append(output)
80
  probability_all.append(probability)
@@ -87,8 +84,7 @@ def main(file):
87
  summary_df.to_csv('output.csv', index=False)
88
  return 'outputs.csv'
89
 
90
-
91
- iface = gr.Interface(fn=main,
92
  inputs="text",
93
  outputs= "file")
94
  iface.launch()
 
48
  return l
49
 
50
  def ACE(file):
51
+ test_seq = file
52
+ all = []
53
+ for j in range(2, 11):
54
+ X = Kmers_funct([test_seq], j)
55
+ all.extend(X)
56
  model = MyModel()
57
  model.load_state_dict(torch.load("best_model.pth", map_location=torch.device('cpu')), strict=False)
58
  model = model.to(device)
59
  model.eval()
60
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
 
61
  max_len = 30
 
 
 
 
 
 
 
 
62
 
 
 
 
63
  seq_all = []
64
  output_all = []
65
  probability_all = []
 
 
 
66
  for seq in all:
67
+ test_data = tokenizer(seq, max_length=max_len, padding="max_length",truncation=True, return_tensors='pt')
68
+ out_probability = []
69
+ predict = model(test_data)
70
+ out_probability.extend(np.max(np.array(predict.cpu()),axis=1).tolist())
71
+ test_argmax = np.argmax(predict.cpu(), axis=1).tolist()
72
+ id2str = {0:"non-ACE", 1:"ACE"}
73
+ output = id2str[test_argmax[0]]
74
+ probability = out_probability[0]
75
  seq_all.append(seq)
76
  output_all.append(output)
77
  probability_all.append(probability)
 
84
  summary_df.to_csv('output.csv', index=False)
85
  return 'outputs.csv'
86
 
87
+ iface = gr.Interface(fn=ACE,
 
88
  inputs="text",
89
  outputs= "file")
90
  iface.launch()