garvitcpp commited on
Commit
fc1da4f
Β·
verified Β·
1 Parent(s): ea4845b

Update app/api/v1/chat.py

Browse files
Files changed (1) hide show
  1. app/api/v1/chat.py +30 -5
app/api/v1/chat.py CHANGED
@@ -109,13 +109,38 @@ async def chat_with_repository(
109
  success=False
110
  )
111
 
112
- logger.info(f"βœ… Found {len(similar_chunks)} relevant code chunks")
113
 
114
- # Generate AI response
115
- logger.info(f"πŸ€– Generating AI response with Gemini...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  ai_response = await chat_service.generate_response(
117
  query=request.query,
118
- code_chunks=similar_chunks,
119
  repository_name=repository.name
120
  )
121
 
@@ -166,7 +191,7 @@ async def chat_with_repository(
166
  response=ai_response['response'],
167
  sources=ai_response['sources'],
168
  repository_name=repository.name,
169
- context_chunks_used=len(similar_chunks),
170
  model_used=ai_response['model_used'],
171
  success=ai_response['success']
172
  )
 
109
  success=False
110
  )
111
 
112
+ logger.info(f"βœ… Found {len(similar_chunks)} relevant chunk identifiers from Pinecone")
113
 
114
+ # Fetch full content from PostgreSQL
115
+ logger.info(f"πŸ“– Fetching full code content from PostgreSQL...")
116
+ from app.models.code_file import CodeFile
117
+
118
+ full_chunks = []
119
+ for chunk_meta in similar_chunks:
120
+ # Query database for full content
121
+ code_file = db.query(CodeFile).filter(
122
+ CodeFile.repository_id == request.repository_id,
123
+ CodeFile.file_path == chunk_meta['file_path'],
124
+ CodeFile.chunk_index == chunk_meta['chunk_index']
125
+ ).first()
126
+
127
+ if code_file:
128
+ full_chunks.append({
129
+ 'file_path': code_file.file_path,
130
+ 'content': code_file.full_content, # FULL CONTENT from PostgreSQL!
131
+ 'start_line': code_file.start_line,
132
+ 'end_line': code_file.end_line,
133
+ 'chunk_type': code_file.chunk_type,
134
+ 'similarity': chunk_meta['similarity']
135
+ })
136
+
137
+ logger.info(f"βœ… Retrieved {len(full_chunks)} complete code chunks from database")
138
+
139
+ # Generate AI response with FULL content
140
+ logger.info(f"πŸ€– Generating AI response with Gemini using complete code...")
141
  ai_response = await chat_service.generate_response(
142
  query=request.query,
143
+ code_chunks=full_chunks, # Use full_chunks instead of similar_chunks
144
  repository_name=repository.name
145
  )
146
 
 
191
  response=ai_response['response'],
192
  sources=ai_response['sources'],
193
  repository_name=repository.name,
194
+ context_chunks_used=len(full_chunks), # Use full_chunks count
195
  model_used=ai_response['model_used'],
196
  success=ai_response['success']
197
  )