import os
import json
import re
from datetime import datetime
from collections import defaultdict
from dateutil.parser import parse
from git import Repo, GitCommandError
import javalang
from javalang.tree import (
    ClassDeclaration, InterfaceDeclaration, EnumDeclaration,
    MethodDeclaration, FieldDeclaration, ConstructorDeclaration
)
from javalang.parser import JavaSyntaxError
from tqdm import tqdm
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('JavaCodePreprocessor')

class GitRepoAnalyzer:
    def __init__(self, repo_path):
        # 确保路径指向包含 .git 目录的仓库根目录
        self.repo_path = self.find_git_root(repo_path)
        if not self.repo_path:
            raise ValueError(f"No Git repository found at: {repo_path}")
        
        logger.info(f"Initializing Git repository at: {self.repo_path}")
        self.repo = Repo(self.repo_path)
        self.commits_cache = {}
    
    def find_git_root(self, start_path):
        """递归向上查找包含 .git 目录的仓库根目录"""
        current_path = os.path.abspath(start_path)
        
        while current_path != os.path.dirname(current_path):  # 直到根目录
            git_dir = os.path.join(current_path, '.git')
            if os.path.exists(git_dir) and os.path.isdir(git_dir):
                return current_path
            # 向上移动一级
            current_path = os.path.dirname(current_path)
        
        return None
    
    def analyze_file(self, file_path):
        """分析单个 Java 文件，返回代码内容和历史信息"""
        # 获取文件完整路径
        full_path = os.path.join(self.repo_path, file_path)
        
        if not os.path.exists(full_path):
            logger.warning(f"File not found: {full_path}")
            return None
        
        # 读取文件内容
        try:
            with open(full_path, 'r', encoding='utf-8') as f:
                source_code = f.read()
        except Exception as e:
            logger.error(f"Error reading file {full_path}: {str(e)}")
            return None
        
        # 解析代码结构
        try:
            code_structure = self.parse_code_structure(source_code, full_path)
            if not code_structure:
                logger.warning(f"Failed to parse code structure for {file_path}")
                return None
        except JavaSyntaxError as e:
            logger.error(f"Java syntax error in {file_path}: {e}")
            return None
        except Exception as e:
            logger.error(f"Error parsing {file_path}: {str(e)}")
            return None
        
        # 获取文件提交历史
        file_history = self.get_file_history(file_path)
        if not file_history:
            logger.warning(f"No commit history found for {file_path}")
        
        # 获取行级修改历史
        line_history = self.get_line_history(file_path, source_code)
        if not line_history:
            logger.warning(f"No line history found for {file_path}")
        
        return {
            'file_path': file_path,
            'source_code': source_code,
            'code_structure': code_structure,
            'file_history': file_history,
            'line_history': line_history
        }
    
    def parse_code_structure(self, source_code, file_path):
        """使用 javalang 精确解析 Java 代码结构"""
        tree = javalang.parse.parse(source_code)
        structure = {
            'package': tree.package.name if tree.package else '',
            'classes': [],
            'interfaces': [],
            'enums': [],
            'methods': [],
            'constructors': [],
            'fields': [],
            'imports': [str(imp.path) for imp in tree.imports] if tree.imports else []
        }
        
        # 提取所有顶级类型声明
        for type_decl in tree.types:
            # 类声明
            if isinstance(type_decl, ClassDeclaration):
                class_info = self.extract_class_info(type_decl, source_code)
                structure['classes'].append(class_info)
            
            # 接口声明
            elif isinstance(type_decl, InterfaceDeclaration):
                interface_info = self.extract_interface_info(type_decl, source_code)
                structure['interfaces'].append(interface_info)
            
            # 枚举声明
            elif isinstance(type_decl, EnumDeclaration):
                enum_info = self.extract_enum_info(type_decl, source_code)
                structure['enums'].append(enum_info)
        
        # 提取所有方法（包括类内方法）
        for path, node in tree.filter(MethodDeclaration):
            method_info = self.extract_method_info(node, source_code)
            structure['methods'].append(method_info)
        
        # 提取构造函数
        for path, node in tree.filter(ConstructorDeclaration):
            constructor_info = self.extract_constructor_info(node, source_code)
            structure['constructors'].append(constructor_info)
        
        # 提取字段
        for path, node in tree.filter(FieldDeclaration):
            for declarator in node.declarators:
                field_info = self.extract_field_info(node, declarator, source_code)
                structure['fields'].append(field_info)
        
        return structure
    
    def extract_class_info(self, class_decl, source_code):
        """提取类信息"""
        class_info = {
            'type': 'class',
            'name': class_decl.name,
            'modifiers': class_decl.modifiers or [],
            'line_range': self.get_node_line_range(class_decl, source_code),
            'methods': [],
            'fields': [],
            'constructors': []
        }
        return class_info
    
    def extract_interface_info(self, interface_decl, source_code):
        """提取接口信息"""
        interface_info = {
            'type': 'interface',
            'name': interface_decl.name,
            'modifiers': interface_decl.modifiers or [],
            'line_range': self.get_node_line_range(interface_decl, source_code),
            'methods': []
        }
        return interface_info
    
    def extract_enum_info(self, enum_decl, source_code):
        """提取枚举信息"""
        enum_info = {
            'type': 'enum',
            'name': enum_decl.name,
            'modifiers': enum_decl.modifiers or [],
            'line_range': self.get_node_line_range(enum_decl, source_code),
            'constants': [const.name for const in enum_decl.body.constants]
        }
        return enum_info
    
    def extract_method_info(self, method_decl, source_code):
        """提取方法信息"""
        params = [{'type': str(param.type), 'name': param.name} 
                 for param in method_decl.parameters]
        return_type = str(method_decl.return_type) if method_decl.return_type else 'void'
        
        return {
            'type': 'method',
            'name': method_decl.name,
            'modifiers': method_decl.modifiers or [],
            'return_type': return_type,
            'parameters': params,
            'line_range': self.get_node_line_range(method_decl, source_code)
        }
    
    def extract_constructor_info(self, constructor_decl, source_code):
        """提取构造函数信息"""
        params = [{'type': str(param.type), 'name': param.name} 
                 for param in constructor_decl.parameters]
        
        return {
            'type': 'constructor',
            'name': constructor_decl.name,
            'modifiers': constructor_decl.modifiers or [],
            'parameters': params,
            'line_range': self.get_node_line_range(constructor_decl, source_code)
        }
    
    def extract_field_info(self, field_decl, declarator, source_code):
        """提取字段信息"""
        return {
            'type': 'field',
            'name': declarator.name,
            'modifiers': field_decl.modifiers or [],
            'field_type': str(field_decl.type),
            'line_range': self.get_node_line_range(field_decl, source_code)
        }
    
    def get_node_line_range(self, node, source_code):
        """获取 AST 节点的精确行号范围"""
        start_line = node.position.line if node.position else 1
        end_line = start_line

        # 尝试通过 end 属性确定结束行
        if hasattr(node, 'end') and node.end:
            end_line = node.end.line
            return (start_line, end_line)

        # 如果有 body（类、接口、枚举）
        if hasattr(node, 'body'):
            if node.body:
                # 获取 body 中第一个和最后一个元素的行号
                first_body_line = node.body[0].position.line if node.body[0].position else start_line
                last_body_line = node.body[-1].position.line if node.body[-1].position else start_line + 5
                return (start_line, last_body_line)
            else:
                # 空 body，默认估算几行
                return (start_line, start_line + 2)

        # 如果是方法或字段声明，根据参数/修饰符等结构估算
        elif hasattr(node, 'parameters') or hasattr(node, 'declarators'):
            return (start_line, start_line + 3)

        # 默认返回起始行
        return (start_line, start_line + 1)
    
    def get_file_history(self, file_path):
        """获取文件的提交历史"""
        try:
            # 获取文件的所有提交
            commits = list(self.repo.iter_commits(paths=file_path))
            if not commits:
                logger.warning(f"No commits found for file: {file_path}")
                return []
            
            history = []
            
            for commit in commits:
                history.append({
                    'id': commit.hexsha,
                    'author': commit.author.name,
                    'email': commit.author.email,
                    'timestamp': commit.authored_datetime.isoformat(),
                    'message': commit.message.strip()
                })
            
            # 按时间顺序排序（从旧到新）
            history.sort(key=lambda c: parse(c['timestamp']))
            
            return history
        except GitCommandError as e:
            logger.error(f"Git command error for {file_path}: {str(e)}")
            return []
        except Exception as e:
            logger.error(f"Error getting history for {file_path}: {str(e)}")
            return []
    
    def get_line_history(self, file_path, source_code):
        """获取每行的修改历史"""
        try:
            blame = self.repo.blame('HEAD', file_path)
            line_history = {}
            lines = source_code.split('\n')
            
            for i, line_blame in enumerate(blame):
                line_num = i + 1
                
                # 使用 hasattr 来判断是否为 BlameEntry 类型
                if hasattr(line_blame, 'commit'):
                    commit = line_blame.commit
                    line_history[line_num] = {
                        'id': commit.hexsha,
                        'author': commit.author.name,
                        'email': commit.author.email,
                        'timestamp': commit.authored_datetime.isoformat(),
                        'message': commit.message.strip()
                    }
                # 否则当作 (commit, line) 元组处理
                elif isinstance(line_blame, tuple) and len(line_blame) == 2:
                    commit, line = line_blame
                    line_history[line_num] = {
                        'id': commit.hexsha,
                        'author': commit.author.name,
                        'email': commit.author.email,
                        'timestamp': commit.authored_datetime.isoformat(),
                        'message': commit.message.strip()
                    }
            
            return line_history
        except GitCommandError as e:
            logger.error(f"Git blame error for {file_path}: {str(e)}")
            return {}
        except Exception as e:
            logger.error(f"Error getting line history for {file_path}: {str(e)}")
            return {}

class CodeChunker:
    @staticmethod
    def chunkify(analysis_result):
        """将代码分析结果分解为逻辑块 - 精确版本"""
        chunks = []
        
        # 1. 处理顶级结构（类、接口、枚举）
        for cls in analysis_result['code_structure']['classes']:
            chunks.append(CodeChunker.create_class_chunk(cls))
        
        for interface in analysis_result['code_structure']['interfaces']:
            chunks.append(CodeChunker.create_interface_chunk(interface))
        
        for enum in analysis_result['code_structure']['enums']:
            chunks.append(CodeChunker.create_enum_chunk(enum))
        
        # 2. 处理方法（包括类内方法）
        for method in analysis_result['code_structure']['methods']:
            chunks.append(CodeChunker.create_method_chunk(method))
        
        # 3. 处理构造函数
        for constructor in analysis_result['code_structure']['constructors']:
            chunks.append(CodeChunker.create_constructor_chunk(constructor))
        
        # 4. 处理字段
        for field in analysis_result['code_structure']['fields']:
            chunks.append(CodeChunker.create_field_chunk(field))
        
        # 5. 处理未包含的代码部分（包声明、导入等）
        all_covered_lines = set()
        for chunk in chunks:
            start, end = chunk['line_range']
            all_covered_lines.update(range(start, end + 1))
        
        # 添加未覆盖的代码块
        chunks.extend(CodeChunker.create_uncovered_chunks(
            analysis_result['source_code'], 
            all_covered_lines,
            analysis_result['line_history']
        ))
        
        # 按行号排序
        chunks.sort(key=lambda x: x['line_range'][0])
        
        return chunks
    
    @staticmethod
    def create_class_chunk(cls):
        """创建类块"""
        return {
            'type': 'CLASS',
            'identifier': cls['name'],
            'line_range': cls['line_range'],
            'signature': f"{' '.join(cls['modifiers'])} class {cls['name']}"
        }
    
    @staticmethod
    def create_interface_chunk(interface):
        """创建接口块"""
        return {
            'type': 'INTERFACE',
            'identifier': interface['name'],
            'line_range': interface['line_range'],
            'signature': f"{' '.join(interface['modifiers'])} interface {interface['name']}"
        }
    
    @staticmethod
    def create_enum_chunk(enum):
        """创建枚举块"""
        return {
            'type': 'ENUM',
            'identifier': enum['name'],
            'line_range': enum['line_range'],
            'signature': f"{' '.join(enum['modifiers'])} enum {enum['name']}"
        }
    
    @staticmethod
    def create_method_chunk(method):
        """创建方法块"""
        params = ', '.join([f"{p['type']} {p['name']}" for p in method['parameters']])
        return {
            'type': 'METHOD',
            'identifier': method['name'],
            'line_range': method['line_range'],
            'signature': f"{' '.join(method['modifiers'])} {method['return_type']} {method['name']}({params})"
        }
    
    @staticmethod
    def create_constructor_chunk(constructor):
        """创建构造函数块"""
        params = ', '.join([f"{p['type']} {p['name']}" for p in constructor['parameters']])
        return {
            'type': 'CONSTRUCTOR',
            'identifier': constructor['name'],
            'line_range': constructor['line_range'],
            'signature': f"{' '.join(constructor['modifiers'])} {constructor['name']}({params})"
        }
    
    @staticmethod
    def create_field_chunk(field):
        """创建字段块"""
        return {
            'type': 'FIELD',
            'identifier': field['name'],
            'line_range': field['line_range'],
            'signature': f"{' '.join(field['modifiers'])} {field['field_type']} {field['name']}"
        }
    
    @staticmethod
    def create_uncovered_chunks(source_code, covered_lines, line_history):
        """创建未覆盖代码的块（包声明、导入等）"""
        chunks = []
        lines = source_code.split('\n')
        current_chunk = []
        current_start = None
        
        for i, line in enumerate(lines):
            line_num = i + 1
            
            # 如果行未被覆盖
            if line_num not in covered_lines:
                # 如果是空行，跳过
                if not line.strip():
                    continue
                
                # 如果是新块的开始
                if current_start is None:
                    current_start = line_num
                
                current_chunk.append((line_num, line))
            else:
                # 如果当前有块，则保存
                if current_chunk:
                    chunk = CodeChunker.create_general_chunk(current_chunk, line_history)
                    if chunk:
                        chunks.append(chunk)
                    current_chunk = []
                    current_start = None
        
        # 处理最后一个块
        if current_chunk:
            chunk = CodeChunker.create_general_chunk(current_chunk, line_history)
            if chunk:
                chunks.append(chunk)
        
        return chunks
    
    @staticmethod
    def create_general_chunk(block_lines, line_history):
        """创建通用代码块"""
        if not block_lines:
            return None
            
        start_line = block_lines[0][0]
        end_line = block_lines[-1][0]
        content = "\n".join(line for _, line in block_lines)
        
        # 确定块类型
        first_line = block_lines[0][1].strip()
        if first_line.startswith('package '):
            chunk_type = 'PACKAGE'
            identifier = 'package_declaration'
        elif first_line.startswith('import '):
            chunk_type = 'IMPORT'
            identifier = f"import_{start_line}"
        else:
            chunk_type = 'GENERAL'
            identifier = f"code_block_{start_line}_{end_line}"
        
        # 获取主要作者
        authors = defaultdict(int)
        for line_num, _ in block_lines:
            if line_num in line_history:
                author = line_history[line_num]['author']
                authors[author] += 1
        
        primary_author = max(authors.items(), key=lambda x: x[1])[0] if authors else "Unknown"
        
        return {
            'type': chunk_type,
            'identifier': identifier,
            'line_range': (start_line, end_line),
            'signature': f"{chunk_type} block @ {start_line}-{end_line} (by {primary_author})",
            'content': content
        }

class KnowledgeEntryGenerator:
    @staticmethod
    def generate_entries(chunks, analysis_result):
        """为每个代码块生成知识条目"""
        entries = []
        source_lines = analysis_result['source_code'].split('\n')
        
        for chunk in chunks:
            # 获取代码块内容
            start, end = chunk['line_range']
            content = "\n".join(source_lines[start-1:end])
            
            # 创建元数据
            metadata = {
                'chunk_type': chunk['type'],
                'identifier': chunk['identifier'],
                'file_path': analysis_result['file_path'],
                'line_range': f"{start}-{end}",
                'signature': chunk.get('signature', ''),
                'package': analysis_result['code_structure']['package'],
                'file_history': analysis_result['file_history']  # 添加 file_history 到 metadata
            }
            
            # 添加修改历史元数据
            KnowledgeEntryGenerator.add_change_metadata(metadata, chunk, analysis_result)
            
            # 创建条目
            entries.append({
                'id': f"{metadata['file_path']}:{start}-{end}",
                'content': content,
                'metadata': metadata
            })
        
        return entries
    
    @staticmethod
    def add_change_metadata(metadata, chunk, analysis_result):
        """添加修改历史元数据"""
        line_history = analysis_result['line_history']
        start, end = chunk['line_range']
        
        # 收集所有相关提交
        all_commits = []
        line_histories = []
        
        for line in range(start, end + 1):
            if line in line_history:
                commit_info = line_history[line]
                all_commits.append(commit_info)
                line_histories.append({
                    'line': line,
                    'content': analysis_result['source_code'].split('\n')[line-1],
                    'commit': commit_info
                })
        
        # 添加唯一贡献者
        unique_authors = {c['author'] for c in all_commits}
        metadata['unique_contributors'] = list(unique_authors)
        metadata['total_modifications'] = len(all_commits)
        
        # 添加首次和最近提交
        if all_commits:
            sorted_commits = sorted(all_commits, key=lambda c: parse(c['timestamp']))
            metadata['first_commit'] = sorted_commits[0]
            metadata['last_commit'] = sorted_commits[-1]
        
        # 添加行历史
        metadata['line_histories'] = line_histories

from concurrent.futures import ThreadPoolExecutor, as_completed

def process_repository(repo_path, output_dir, target_files=None):
    try:
        analyzer = GitRepoAnalyzer(repo_path)
    except Exception as e:
        logger.error(f"Failed to initialize repository analyzer: {str(e)}")
        return []
    
    # 获取所有 Java 文件
    if not target_files:
        target_files = []
        for root, _, files in os.walk(analyzer.repo_path):
            for file in files:
                if file.endswith('.java'):
                    rel_path = os.path.relpath(os.path.join(root, file), analyzer.repo_path)
                    target_files.append(rel_path.replace('\\', '/'))
    
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    
    # 使用多线程处理每个文件
    all_entries = []
    with ThreadPoolExecutor(max_workers=4) as executor:  # 可根据需要调整 max_workers
        futures = [executor.submit(process_single_file, file_path, analyzer, output_dir) for file_path in target_files]
        
        for future in tqdm(as_completed(futures), total=len(futures), desc="Processing files"):
            entries = future.result()
            all_entries.extend(entries)
    
    # 保存合并的所有条目
    if all_entries:
        with open(os.path.join(output_dir, 'all_entries.json'), 'w', encoding='utf-8') as f:
            json.dump(all_entries, f, indent=2, ensure_ascii=False)
    
    logger.info(f"Generated {len(all_entries)} knowledge entries")
    return all_entries


def process_single_file(file_path, analyzer, output_dir):
    try:
        analysis = analyzer.analyze_file(file_path)
        if not analysis or not analysis['code_structure']:
            return []
        
        chunks = CodeChunker.chunkify(analysis)
        entries = KnowledgeEntryGenerator.generate_entries(chunks, analysis)
        
        output_file = os.path.join(output_dir, f"{os.path.basename(file_path)}.json")
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(entries, f, indent=2, ensure_ascii=False)
        
        return entries
    except Exception as e:
        logger.error(f"Error processing {file_path}: {str(e)}")
        return []

if __name__ == "__main__":
    # 配置参数
    REPO_PATH = "D:/Project/JavaCode/WeChatWriter"
    OUTPUT_DIR = "./code_knowledge"
    TARGET_FILES = ["ww-business/src/main/java/com/cencen/business/service/impl/BaiduQianfanSvcImpl.java"]
     
    # 执行处理
    entries = process_repository(REPO_PATH, OUTPUT_DIR, TARGET_FILES)