andreska commited on
Commit
1f53559
·
verified ·
1 Parent(s): 99ec9bc

Update to use Inverence API with Qwen

Browse files
Files changed (1) hide show
  1. app.py +15 -5
app.py CHANGED
@@ -1,7 +1,17 @@
1
- from transformers import pipeline
 
2
 
3
  def analyze_project(project_data, question):
4
- nlp = pipeline("text-generation", model="gpt2")
5
- prompt = f"Analyze this project: {project_data}\n\nQuestion: {question}"
6
- output = nlp(prompt, max_length=50, num_return_sequences=1)
7
- return output[0]['generated_text']
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from huggingface_hub import InferenceClient
3
 
4
  def analyze_project(project_data, question):
5
+ api_key = os.getenv("HF_API_KEY")
6
+ client = InferenceClient(api_key=api_key)
7
+
8
+ prompt = f"Analyze this project: {project_data}\n\nQuestion: {question}"
9
+ inputs = client.encoding("text", prompt)
10
+
11
+ outputs = client.generate(
12
+ model="Qwen/Qwen2.5-72B-Instruct",
13
+ inputs=inputs,
14
+ max_new_tokens=100
15
+ )
16
+
17
+ return outputs["generated_text"][0]