import openai
import time
import requests
from typing import Dict, List
from config import JavaBugDetectorConfig

class JavaBugDetector:
    def __init__(self):
        self.config = JavaBugDetectorConfig()
        self.config.validate_config()
        
        self.provider = self.config.get_current_provider()
        
        if self.provider == 'openai':
            self.client = openai.OpenAI(
                api_key=self.config.OPENAI_API_KEY,
                base_url=self.config.OPENAI_BASE_URL
            )
        elif self.provider == 'kimi':
            # Kimi使用OpenAI兼容的API格式
            self.client = openai.OpenAI(
                api_key=self.config.KIMI_API_KEY,
                base_url=self.config.KIMI_BASE_URL
            )
    
    def _get_model_name(self) -> str:
        """获取当前使用的模型名称"""
        if self.provider == 'openai':
            return self.config.OPENAI_MODEL
        elif self.provider == 'kimi':
            return self.config.KIMI_MODEL
        else:
            raise ValueError(f"不支持的提供商: {self.provider}")
    
    def _get_system_prompt(self) -> str:
        """获取系统提示词"""
        base_prompt = """你是一个专业的Java BUG检测专家，拥有丰富的Java开发和调试经验。
你的任务是仔细分析Java代码，识别各种类型的BUG，包括但不限于：
- 运行时异常风险
- 内存泄漏
- 线程安全问题
- 资源管理问题
- 逻辑错误
- 性能问题
- 安全漏洞

请提供详细、准确、可操作的BUG修复建议。"""
        
        if self.provider == 'kimi':
            # Kimi特别擅长长文本分析，可以添加特定指示
            base_prompt += """

作为Kimi模型，你具有强大的长文本理解能力，请充分利用这个优势：
- 深入分析代码的上下文关系
- 识别跨方法、跨类的潜在问题
- 提供更全面的代码质量评估"""
        
        return base_prompt
    
    def detect_bugs(self, file_info: Dict[str, any], code_content: str, quick_scan_results: Dict[str, int]) -> Dict[str, any]:
        """使用AI检测Java代码中的BUG"""
        try:
            # 构建提示
            prompt = self.config.BUG_DETECTION_PROMPT.format(
                file_path=file_info['relative_path'],
                code_content=code_content
            )
            
            # 如果有快速扫描结果，添加到提示中
            if quick_scan_results:
                prompt += f"\n\n## 预扫描发现的潜在问题:\n"
                for issue, count in quick_scan_results.items():
                    prompt += f"- {issue}: {count} 处\n"
                prompt += "\n请重点关注这些预扫描发现的问题。"
            
            # 根据提供商调整token限制
            max_tokens = self.config.MAX_TOKENS
            if self.provider == 'kimi':
                # Kimi支持更长的输出，可以适当增加
                max_tokens = min(max_tokens * 2, 8000)
            
            # 调用AI API
            response = self.client.chat.completions.create(
                model=self._get_model_name(),
                messages=[
                    {
                        "role": "system",
                        "content": self._get_system_prompt()
                    },
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                max_tokens=max_tokens,
                temperature=self.config.TEMPERATURE,
                timeout=self.config.REQUEST_TIMEOUT
            )
            
            bug_analysis = response.choices[0].message.content
            
            # 分析严重程度
            severity = self._analyze_severity(bug_analysis, quick_scan_results)
            
            return {
                'file_path': file_info['relative_path'],
                'package': file_info['package'],
                'file_size': file_info['size'],
                'bug_analysis': bug_analysis,
                'quick_scan_results': quick_scan_results,
                'severity_level': severity,
                'ai_provider': self.provider,
                'model_name': self._get_model_name(),
                'status': 'success',
                'tokens_used': response.usage.total_tokens if response.usage else 0,
                'timestamp': time.time()
            }
            
        except Exception as e:
            return {
                'file_path': file_info['relative_path'],
                'package': file_info['package'],
                'file_size': file_info['size'],
                'bug_analysis': f"BUG检测失败: {str(e)}",
                'quick_scan_results': quick_scan_results,
                'severity_level': 'unknown',
                'ai_provider': self.provider,
                'model_name': self._get_model_name(),
                'status': 'error',
                'tokens_used': 0,
                'timestamp': time.time()
            }
    
    def _analyze_severity(self, analysis: str, quick_scan: Dict[str, int]) -> str:
        """分析BUG严重程度"""
        critical_keywords = ['严重BUG', '🔴', 'Critical', '空指针', '内存泄漏', 'SQL注入']
        major_keywords = ['重要BUG', '🟡', 'Major', '线程安全', '资源泄漏']
        
        analysis_lower = analysis.lower()
        
        # 检查严重BUG关键词
        critical_count = sum(1 for keyword in critical_keywords if keyword.lower() in analysis_lower)
        major_count = sum(1 for keyword in major_keywords if keyword.lower() in analysis_lower)
        
        # 结合快速扫描结果
        high_risk_patterns = ['SQL注入风险', '资源未关闭风险', '可能的空指针风险']
        quick_scan_risk = sum(quick_scan.get(pattern, 0) for pattern in high_risk_patterns)
        
        if critical_count > 0 or quick_scan_risk > 5:
            return 'high'
        elif major_count > 0 or quick_scan_risk > 2:
            return 'medium'
        else:
            return 'low'
    
    def batch_detect(self, files_data: List[tuple], progress_callback=None) -> List[Dict]:
        """批量检测多个Java文件的BUG"""
        results = []
        total_files = len(files_data)
        
        for i, (file_info, code_content, quick_scan_results) in enumerate(files_data):
            if progress_callback:
                progress_callback(i + 1, total_files, file_info['relative_path'])
            
            detection_result = self.detect_bugs(file_info, code_content, quick_scan_results)
            results.append(detection_result)
            
            # 根据提供商调整延迟
            if self.provider == 'openai':
                time.sleep(0.5)  # OpenAI API限制相对严格
            elif self.provider == 'kimi':
                time.sleep(0.2)  # Kimi限制相对宽松
        
        return results
    
    def test_connection(self) -> Dict[str, any]:
        """测试AI服务连接"""
        try:
            test_prompt = "请简单回复'连接测试成功'"
            
            response = self.client.chat.completions.create(
                model=self._get_model_name(),
                messages=[
                    {
                        "role": "user", 
                        "content": test_prompt
                    }
                ],
                max_tokens=50,
                temperature=0.1
            )
            
            return {
                'status': 'success',
                'provider': self.provider,
                'model': self._get_model_name(),
                'response': response.choices[0].message.content,
                'tokens_used': response.usage.total_tokens if response.usage else 0
            }
            
        except Exception as e:
            return {
                'status': 'error',
                'provider': self.provider,
                'model': self._get_model_name(),
                'error': str(e)
            }
