uvpatel7271 commited on
Commit
5334008
·
1 Parent(s): 1278df1

winning commit we are comming to banglore be prepared meta

Browse files
Files changed (2) hide show
  1. pyproject.toml +9 -1
  2. services/analysis_service.py +12 -2
pyproject.toml CHANGED
@@ -14,6 +14,7 @@ dependencies = [
14
  "openai>=1.76.0",
15
  "openenv-core[core]>=0.2.2",
16
  "pytest>=8.0.0",
 
17
  "torch>=2.2.0",
18
  "transformers>=4.45.0",
19
  "uvicorn>=0.30.0",
@@ -34,5 +35,12 @@ packages = [
34
  "python_env.server",
35
  "python_env.tasks",
36
  "python_env.graders",
 
 
 
 
 
 
 
37
  ]
38
- package-dir = { "python_env" = ".", "python_env.server" = "server", "python_env.tasks" = "tasks", "python_env.graders" = "graders" }
 
14
  "openai>=1.76.0",
15
  "openenv-core[core]>=0.2.2",
16
  "pytest>=8.0.0",
17
+ "streamlit>=1.44.0",
18
  "torch>=2.2.0",
19
  "transformers>=4.45.0",
20
  "uvicorn>=0.30.0",
 
35
  "python_env.server",
36
  "python_env.tasks",
37
  "python_env.graders",
38
+ "python_env.api",
39
+ "python_env.app",
40
+ "python_env.analyzers",
41
+ "python_env.models",
42
+ "python_env.schemas",
43
+ "python_env.services",
44
+ "python_env.utils",
45
  ]
46
+ package-dir = { "python_env" = ".", "python_env.server" = "server", "python_env.tasks" = "tasks", "python_env.graders" = "graders", "python_env.api" = "api", "python_env.app" = "app", "python_env.analyzers" = "analyzers", "python_env.models" = "models", "python_env.schemas" = "schemas", "python_env.services" = "services", "python_env.utils" = "utils" }
services/analysis_service.py CHANGED
@@ -79,8 +79,18 @@ class AnalysisService:
79
  combined_scores[domain] = round((0.6 * model_score) + (0.4 * heuristic_score), 4)
80
 
81
  detected_domain = request.domain_hint if request.domain_hint != "auto" else max(combined_scores, key=combined_scores.get)
82
- analyzer = self._analyzers.get(detected_domain, analyze_dsa_code if detected_domain == "dsa" else analyze_web_code)
83
- domain_analysis = analyzer(request.code, parsed, complexity) if detected_domain in self._analyzers else DomainAnalysis(domain="general", domain_score=0.6, issues=[], suggestions=["Add stronger domain-specific context for deeper analysis."], highlights={})
 
 
 
 
 
 
 
 
 
 
84
 
85
  lint_score = _lint_score(parsed)
86
  score_breakdown = self.reward_service.compute(
 
79
  combined_scores[domain] = round((0.6 * model_score) + (0.4 * heuristic_score), 4)
80
 
81
  detected_domain = request.domain_hint if request.domain_hint != "auto" else max(combined_scores, key=combined_scores.get)
82
+ analyzer = self._analyzers.get(detected_domain)
83
+ domain_analysis = (
84
+ analyzer(request.code, parsed, complexity)
85
+ if analyzer is not None
86
+ else DomainAnalysis(
87
+ domain="general",
88
+ domain_score=0.6,
89
+ issues=[],
90
+ suggestions=["Add stronger domain-specific context for deeper analysis."],
91
+ highlights={},
92
+ )
93
+ )
94
 
95
  lint_score = _lint_score(parsed)
96
  score_breakdown = self.reward_service.compute(