import os
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor

# Import the classes from other modules in the 'src' package
from .resume_parser import ResumeParser
from .ai_analyzer import DeepSeekAnalyzer

class BatchProcessor:
    """
    Orchestrates the resume processing workflow, including parsing,
    analyzing, and aggregating results.
    """
    def __init__(self, parser: ResumeParser, analyzer: DeepSeekAnalyzer):
        self.parser = parser
        self.analyzer = analyzer

    def process_single_resume(self, file_path, job_requirements):
        """
        Processes a single resume file.

        Args:
            file_path (str): The path to the resume file.
            job_requirements (str): The job requirements text.

        Returns:
            dict: A dictionary containing the analysis result and file metadata.
        """
        print(f"Processing: {os.path.basename(file_path)}")
        try:
            # 1. Parse resume text
            resume_text = self.parser.parse_resume(file_path)
            if not resume_text.strip():
                raise ValueError("Extracted resume text is empty.")

            # 2. Analyze with AI
            analysis_result = self.analyzer.analyze_resume(resume_text, job_requirements)

            # 3. Combine results
            result = {
                'file_name': os.path.basename(file_path),
                'file_path': file_path,
                'processed_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                **analysis_result  # Unpack the analysis dictionary
            }
            return result

        except Exception as e:
            print(f"Error processing resume {file_path}: {e}")
            return {
                'file_name': os.path.basename(file_path),
                'file_path': file_path,
                'processed_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'skill_match': 0,
                'experience_match': 0,
                'education_match': 0,
                'overall_score': 0,
                'recommendation': '处理失败',
                'concerns': str(e)
            }

    def process_batch(self, resume_folder, job_requirements, max_workers=4):
        """
        Processes a batch of resumes from a folder using multiple threads.

        Args:
            resume_folder (str): The path to the folder containing resumes.
            job_requirements (str): The job requirements text.
            max_workers (int): The number of concurrent threads to use.

        Returns:
            list: A list of sorted result dictionaries.
        """
        print(f"Starting batch processing for folder: {resume_folder}")
        
        # Find all supported resume files
        resume_files = []
        for root, _, files in os.walk(resume_folder):
            for file in files:
                if file.lower().endswith(tuple(self.parser.supported_formats)):
                    resume_files.append(os.path.join(root, file))
        
        if not resume_files:
            print("No resume files found in the specified folder.")
            return []
            
        print(f"Found {len(resume_files)} resumes to process.")

        results = []
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # Submit all tasks to the thread pool
            future_to_file = {executor.submit(self.process_single_resume, file, job_requirements): file for file in resume_files}
            
            for i, future in enumerate(future_to_file):
                try:
                    result = future.result()
                    results.append(result)
                    print(f"({i+1}/{len(resume_files)}) Finished processing: {result['file_name']}")
                except Exception as e:
                    file_path = future_to_file[future]
                    print(f"An exception occurred while processing {file_path}: {e}")


        # Sort results by overall score in descending order
        if results:
            results.sort(key=lambda x: x.get('overall_score', 0), reverse=True)
            
        return results