acecalisto3 commited on
Commit
c6d7c50
1 Parent(s): 53b8392

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -4
app.py CHANGED
@@ -5,13 +5,12 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoMode
5
  import black
6
  from pylint import lint
7
  from io import StringIO
8
- import openai
9
  import sys
10
  import torch
11
  from huggingface_hub import hf_hub_url, cached_download, HfApi
12
 
13
- # Set your OpenAI API key here
14
- openai.api_key = "YOUR_OPENAI_API_KEY"
15
 
16
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
17
  PROJECT_ROOT = "projects"
@@ -41,6 +40,36 @@ AVAILABLE_CODE_GENERATIVE_MODELS = [
41
  "facebook/bart-large-cnn", # Good for text-to-code tasks
42
  ]
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  class AIAgent:
45
  def __init__(self, name, description, skills):
46
  self.name = name
@@ -430,4 +459,8 @@ elif app_mode == "Workspace Chat App":
430
 
431
  # Use the hf_token to interact with the Hugging Face API
432
  api = HfApi(token=hf_token)
433
- # ... (your logic to deploy the Space using the API)
 
 
 
 
 
5
  import black
6
  from pylint import lint
7
  from io import StringIO
 
8
  import sys
9
  import torch
10
  from huggingface_hub import hf_hub_url, cached_download, HfApi
11
 
12
+ # Set your Hugging Face API key here
13
+ hf_token = "YOUR_HUGGING_FACE_API_KEY" # Replace with your actual token
14
 
15
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
16
  PROJECT_ROOT = "projects"
 
40
  "facebook/bart-large-cnn", # Good for text-to-code tasks
41
  ]
42
 
43
+ # Load pre-trained RAG retriever
44
+ rag_retriever = RagRetriever.from_pretrained("facebook/rag-token-base") # Use a Hugging Face RAG model
45
+
46
+ # Load pre-trained chat model
47
+ chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium") # Use a Hugging Face chat model
48
+
49
+ # Load tokenizer
50
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
51
+
52
+ def process_input(user_input):
53
+ # Input pipeline: Tokenize and preprocess user input
54
+ input_ids = tokenizer(user_input, return_tensors="pt").input_ids
55
+ attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
56
+
57
+ # RAG model: Generate response
58
+ with torch.no_grad():
59
+ output = rag_retriever(input_ids, attention_mask=attention_mask)
60
+ response = output.generator_outputs[0].sequences[0]
61
+
62
+ # Chat model: Refine response
63
+ chat_input = tokenizer(response, return_tensors="pt")
64
+ chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
65
+ chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
66
+ with torch.no_grad():
67
+ chat_output = chat_model(**chat_input)
68
+ refined_response = chat_output.sequences[0]
69
+
70
+ # Output pipeline: Return final response
71
+ return refined_response
72
+
73
  class AIAgent:
74
  def __init__(self, name, description, skills):
75
  self.name = name
 
459
 
460
  # Use the hf_token to interact with the Hugging Face API
461
  api = HfApi(token=hf_token)
462
+ # Function to create a Space on Hugging Face
463
+ def create_space(api, name, description, public, files, entrypoint="launch.py"):
464
+ url = f"{hf_hub_url()}spaces/{name}/prepare-repo"
465
+ headers = {"Authorization": f"Bearer {api.access_token}"}
466
+ </s>