ldhldh commited on
Commit
8fefd32
β€’
1 Parent(s): 8cccf6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -7
app.py CHANGED
@@ -39,6 +39,17 @@ model_name = "daekeun-ml/Llama-2-ko-instruct-13B"
39
  tokenizer = AutoTokenizer.from_pretrained(model_name)
40
  model = AutoDistributedModelForCausalLM.from_pretrained(model_name)
41
 
 
 
 
 
 
 
 
 
 
 
 
42
  def chat(id, npc, prompt):
43
 
44
  # get_coin endpoint
@@ -52,13 +63,13 @@ def chat(id, npc, prompt):
52
  return "no coin"
53
 
54
  # model inference
55
-
56
- prom = ""
57
- inputs = tokenizer(prom, return_tensors="pt")["input_ids"]
58
- outputs = model.generate(inputs, max_new_tokens=100)
59
- print(tokenizer.decode(outputs[0]))
60
-
61
- output = "AI μ‘λ‹΅μž…λ‹ˆλ‹€."
62
 
63
 
64
  # add_transaction endpoint
 
39
  tokenizer = AutoTokenizer.from_pretrained(model_name)
40
  model = AutoDistributedModelForCausalLM.from_pretrained(model_name)
41
 
42
+
43
+ def check(model_name):
44
+ data = requests.get("https://health.petals.dev/api/v1/state").json()
45
+ out = []
46
+ for d in data['model_reports']:
47
+ if d['name'] == model_name:
48
+ if d['state']=="healthy":
49
+ return True
50
+ return False
51
+
52
+
53
  def chat(id, npc, prompt):
54
 
55
  # get_coin endpoint
 
63
  return "no coin"
64
 
65
  # model inference
66
+ if check:
67
+ prom = ""
68
+ inputs = tokenizer(prom, return_tensors="pt")["input_ids"]
69
+ outputs = model.generate(inputs, max_new_tokens=100)
70
+ print(tokenizer.decode(outputs[0]))
71
+ else:
72
+ output = "no model"
73
 
74
 
75
  # add_transaction endpoint