Samarthrr commited on
Commit
0cb2e88
·
verified ·
1 Parent(s): 96d9e55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -32
app.py CHANGED
@@ -10,7 +10,13 @@ import os
10
  app = FastAPI(title="Revcode AI Unified Orchestrator")
11
 
12
  # ---------------------------------------------------------
13
- # 1. SECURITY GUARDIAN (DistilBERT)
 
 
 
 
 
 
14
  # ---------------------------------------------------------
15
  class SecurityClassifier(nn.Module):
16
  def __init__(self):
@@ -25,7 +31,7 @@ class SecurityClassifier(nn.Module):
25
  return self.classifier(outputs.last_hidden_state[:, 0, :])
26
 
27
  # ---------------------------------------------------------
28
- # 2. ARCHITECTURAL GUARDRAILS
29
  # ---------------------------------------------------------
30
  class Guardrails:
31
  @staticmethod
@@ -54,25 +60,11 @@ class Guardrails:
54
  except Exception as e:
55
  return False, f"Syntax analysis failed: {str(e)}"
56
 
57
- # ... (rest of models and load functions remain same)
58
-
59
- @app.post("/verify")
60
- async def verify_fix(data: dict):
61
- # Specialized verification endpoint for external engines
62
- code = data.get("code", "")
63
- is_valid, msg = Guardrails.validate(code)
64
- return {
65
- "is_valid": is_valid,
66
- "message": msg,
67
- "status": "PASSED" if is_valid else "WARNING"
68
- }
69
-
70
- @app.post("/fix")
71
- async def fix_code(data: CodeInput):
72
- model, tokenizer = load_fixer()
73
-
74
- suggestion = data.code
75
- # ... (existing fix code)
76
 
77
  models = {
78
  "fixer": None,
@@ -104,17 +96,17 @@ def load_security():
104
  return models["security"], models["tokenizers"].get("security")
105
 
106
  # ---------------------------------------------------------
107
- # 4. API ENDPOINTS
108
  # ---------------------------------------------------------
109
- class CodeInput(BaseModel):
110
- code: str
 
111
 
112
  @app.post("/analyze")
113
  async def analyze_security(data: CodeInput):
114
  model, tokenizer = load_security()
115
 
116
  if model == "HEURISTIC":
117
- # Rule-based fallback for security
118
  is_vulnerable = "eval(" in data.code or "innerHTML" in data.code
119
  return {
120
  "is_vulnerable": is_vulnerable,
@@ -141,7 +133,7 @@ async def fix_code(data: CodeInput):
141
  model, tokenizer = load_fixer()
142
 
143
  suggestion = data.code
144
- if model == "RULE_ENGINE":
145
  # Advanced Rule-based correction
146
  suggestion = data.code.replace("eval(", "JSON.parse(").replace("console.log(", "// logger.info(")
147
  status = "PASSED"
@@ -163,15 +155,19 @@ async def fix_code(data: CodeInput):
163
  "guardrail_msg": msg
164
  }
165
 
 
 
 
 
 
 
 
 
 
 
166
  @app.post("/feedback")
167
  async def store_feedback(data: dict):
168
- # Store feedback for HITL (Human-In-The-Loop)
169
- # columns: original_code, corrected_code
170
  feedback_file = "feedback_dataset.csv"
171
  df = pd.DataFrame([data])
172
  df.to_csv(feedback_file, mode='a', header=not os.path.exists(feedback_file), index=False)
173
  return {"status": "Feedback stored for retraining"}
174
-
175
- @app.get("/")
176
- async def health():
177
- return {"status": "Revcode AI Engine is alive", "models_loaded": list(models.keys())}
 
10
  app = FastAPI(title="Revcode AI Unified Orchestrator")
11
 
12
  # ---------------------------------------------------------
13
+ # 1. DATA MODELS
14
+ # ---------------------------------------------------------
15
+ class CodeInput(BaseModel):
16
+ code: str
17
+
18
+ # ---------------------------------------------------------
19
+ # 2. SECURITY GUARDIAN (DistilBERT)
20
  # ---------------------------------------------------------
21
  class SecurityClassifier(nn.Module):
22
  def __init__(self):
 
31
  return self.classifier(outputs.last_hidden_state[:, 0, :])
32
 
33
  # ---------------------------------------------------------
34
+ # 3. ARCHITECTURAL GUARDRAILS
35
  # ---------------------------------------------------------
36
  class Guardrails:
37
  @staticmethod
 
60
  except Exception as e:
61
  return False, f"Syntax analysis failed: {str(e)}"
62
 
63
+ # ---------------------------------------------------------
64
+ # 4. GLOBAL MODEL HANDLERS (Lazy Loading)
65
+ # ---------------------------------------------------------
66
+ FIXER_MODEL = "Salesforce/codet5p-220m"
67
+ SECURITY_MODEL = "distilbert-base-uncased"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  models = {
70
  "fixer": None,
 
96
  return models["security"], models["tokenizers"].get("security")
97
 
98
  # ---------------------------------------------------------
99
+ # 5. API ENDPOINTS
100
  # ---------------------------------------------------------
101
+ @app.get("/")
102
+ async def health():
103
+ return {"status": "Revcode AI Engine is alive", "models_loaded": list(models.keys())}
104
 
105
  @app.post("/analyze")
106
  async def analyze_security(data: CodeInput):
107
  model, tokenizer = load_security()
108
 
109
  if model == "HEURISTIC":
 
110
  is_vulnerable = "eval(" in data.code or "innerHTML" in data.code
111
  return {
112
  "is_vulnerable": is_vulnerable,
 
133
  model, tokenizer = load_fixer()
134
 
135
  suggestion = data.code
136
+ if model == "RULE_ENGINE" or not model:
137
  # Advanced Rule-based correction
138
  suggestion = data.code.replace("eval(", "JSON.parse(").replace("console.log(", "// logger.info(")
139
  status = "PASSED"
 
155
  "guardrail_msg": msg
156
  }
157
 
158
+ @app.post("/verify")
159
+ async def verify_fix(data: CodeInput):
160
+ # Specialized verification endpoint for external engines
161
+ is_valid, msg = Guardrails.validate(data.code)
162
+ return {
163
+ "is_valid": is_valid,
164
+ "message": msg,
165
+ "status": "PASSED" if is_valid else "WARNING"
166
+ }
167
+
168
  @app.post("/feedback")
169
  async def store_feedback(data: dict):
 
 
170
  feedback_file = "feedback_dataset.csv"
171
  df = pd.DataFrame([data])
172
  df.to_csv(feedback_file, mode='a', header=not os.path.exists(feedback_file), index=False)
173
  return {"status": "Feedback stored for retraining"}