import json
from typing import List, Dict
import pandas as pd
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed
from app.models.knowledge_base import Document
from app.services.embedding_service import EmbeddingService
from app.utils.text_splitter import split_text
from app.utils.file_processors import FileProcessor
from app import db, logger
import os
from .document_splitter import SemanticSplitter, QASplitter
from app.utils.task_manager import task_manager
from datetime import datetime
import logging
from flask import current_app
from app import create_app

logger = logging.getLogger(__name__)

class DocumentService:
    # 配置并发处理参数
    MAX_WORKERS = 4  # 最大工作线程数
    BATCH_SIZE = 1000  # 每批处理的记录数
    
    @staticmethod
    def process_document(filepath: str, kb_id: int, method: str = 'char') -> List[Document]:
        """处理文档"""
        file_ext = os.path.splitext(filepath)[1].lower()
        
        if file_ext in ['.csv', '.xlsx', '.xls']:
            return DocumentService._process_structured_file(filepath, kb_id, method)
        else:
            return DocumentService._process_text_file(filepath, kb_id, method)

    @staticmethod
    def _process_structured_file(filepath: str, kb_id: int, method: str) -> List[Document]:
        """处理结构化文件（CSV/Excel）"""
        try:
            # 读取文件
            df = DocumentService._read_structured_file(filepath)
            if df.empty:
                raise Exception('File is empty')

            # 如果选择了保持原格式
            if method == 'raw':
                return DocumentService._process_raw_format(df, filepath, kb_id)
            else:
                return DocumentService._process_content_column(df, filepath, kb_id, method)
                
        except Exception as e:
            raise Exception(f'Error processing structured file: {str(e)}')

    @staticmethod
    def _read_structured_file(filepath: str) -> pd.DataFrame:
        """读取结构化文件，支持多种编码格式"""
        file_ext = os.path.splitext(filepath)[1].lower()
        
        if file_ext == '.csv':
            # 尝试不同的编码方式读取CSV
            encodings = ['utf-8', 'gbk', 'gb2312', 'gb18030', 'cp936', 'utf-8-sig']
            df = None
            last_error = None
            
            for encoding in encodings:
                try:
                    logger.info(f"Trying to read CSV with {encoding} encoding")
                    df = pd.read_csv(
                        filepath,
                        encoding=encoding,
                        on_bad_lines='skip',  # 跳过错误行
                        low_memory=False  # 避免数据类型推断警告
                    )
                    logger.info(f"Successfully read CSV file with {encoding} encoding")
                    break
                except UnicodeDecodeError as e:
                    last_error = e
                    logger.warning(f"Failed to read with {encoding} encoding: {str(e)}")
                    continue
                except Exception as e:
                    last_error = e
                    logger.error(f"Error reading CSV with {encoding} encoding: {str(e)}")
                    continue
            
            if df is None:
                raise Exception(f"Unable to read CSV file with any supported encoding. Last error: {str(last_error)}")
        else:
            try:
                # 使用 openpyxl 引擎读取 Excel 文件
                df = pd.read_excel(
                    filepath,
                    engine='openpyxl',
                    na_filter=False,  # 不自动处理 NA 值
                    dtype=str  # 将所有列读取为字符串类型
                )
                logger.info("Successfully read Excel file")
            except Exception as e:
                raise Exception(f"Error reading Excel file: {str(e)}")
        
        # 清理数据
        df = df.replace({'\n': ' ', '\r': ' '}, regex=True)  # 替换换行符
        df = df.fillna('')  # 填充空值
        
        # 验证数据
        if df.empty:
            raise Exception('File is empty')
        
        # 记录数据信息
        logger.info(f"Successfully loaded file with {len(df)} rows and {len(df.columns)} columns")
        
        return df

    @staticmethod
    def _process_raw_format(df: pd.DataFrame, filepath: str, kb_id: int) -> List[Document]:
        """处理原始格式的数据"""
        documents = []
        total_rows = len(df)
        
        # 将数据分成多个批次
        batches = np.array_split(df, max(1, total_rows // DocumentService.BATCH_SIZE))
        
        with ThreadPoolExecutor(max_workers=DocumentService.MAX_WORKERS) as executor:
            # 创建批处理任务
            future_to_batch = {
                executor.submit(
                    DocumentService._process_raw_batch, 
                    batch, 
                    filepath, 
                    kb_id, 
                    list(df.columns)
                ): i for i, batch in enumerate(batches)
            }
            
            # 收集处理结果
            for future in as_completed(future_to_batch):
                try:
                    batch_docs = future.result()
                    documents.extend(batch_docs)
                except Exception as e:
                    raise Exception(f'Error processing batch: {str(e)}')
        
        # 批量保存到数据库
        try:
            for i in range(0, len(documents), DocumentService.BATCH_SIZE):
                batch = documents[i:i + DocumentService.BATCH_SIZE]
                db.session.bulk_save_objects(batch)
                db.session.commit()
        except Exception as e:
            db.session.rollback()
            raise Exception(f'Error saving to database: {str(e)}')
            
        return documents

    @staticmethod
    def _process_raw_batch(batch: pd.DataFrame, filepath: str, kb_id: int, columns: List[str]) -> List[Document]:
        """处理一批原始格式的数据"""
        batch_docs = []
        for _, row in batch.iterrows():
            content = row.to_json(force_ascii=False)
            embedding = EmbeddingService.generate_embedding(content)
            
            doc = Document(
                knowledge_base_id=kb_id,
                title=os.path.basename(filepath),
                content=content,
                embedding=embedding,
                metadata={
                    'original_format': 'structured',
                    'columns': columns
                }
            )
            batch_docs.append(doc)
        return batch_docs

    @staticmethod
    def _process_content_column(df: pd.DataFrame, filepath: str, kb_id: int, method: str) -> List[Document]:
        """处理 content 列的数据"""
        if 'content' not in df.columns:
            raise Exception('CSV/Excel file must contain a "content" column')
        
        documents = []
        contents = df['content'].dropna().astype(str).tolist()
        
        with ThreadPoolExecutor(max_workers=DocumentService.MAX_WORKERS) as executor:
            # 并发处理每个内容
            future_to_content = {
                executor.submit(
                    DocumentService._process_content, 
                    content, 
                    filepath, 
                    kb_id, 
                    method
                ): i for i, content in enumerate(contents)
            }
            
            # 收集处理结果
            for future in as_completed(future_to_content):
                try:
                    content_docs = future.result()
                    documents.extend(content_docs)
                except Exception as e:
                    raise Exception(f'Error processing content: {str(e)}')
        
        # 批量保存到数据库
        try:
            for i in range(0, len(documents), DocumentService.BATCH_SIZE):
                batch = documents[i:i + DocumentService.BATCH_SIZE]
                db.session.bulk_save_objects(batch)
                db.session.commit()
        except Exception as e:
            db.session.rollback()
            raise Exception(f'Error saving to database: {str(e)}')
            
        return documents

    @staticmethod
    def _process_content(content: str, filepath: str, kb_id: int, method: str) -> List[Document]:
        """处理单个内容"""
        chunks = DocumentService.split_document(content, method)
        documents = []
        
        for chunk in chunks:
            embedding = EmbeddingService.generate_embedding(chunk)
            doc = Document(
                knowledge_base_id=kb_id,
                title=os.path.basename(filepath),
                content=chunk,
                embedding=embedding,
                metadata={'original_format': 'text'}
            )
            documents.append(doc)
            
        return documents

    @staticmethod
    def _process_text_file(filepath: str, kb_id: int, method: str) -> List[Document]:
        """处理文本文件"""
        processor = FileProcessor.get_processor(filepath)
        content = processor.read()
        chunks = DocumentService.split_document(content, method)
        documents = DocumentService._process_text_chunks(chunks, filepath, kb_id)
        
        # 批量保存
        db.session.bulk_save_objects(documents)
        db.session.commit()
        
        return documents

    @staticmethod
    def _process_text_chunks(chunks: List[str], filepath: str, kb_id: int) -> List[Document]:
        """处理文本块"""
        documents = []
        for chunk in chunks:
            embedding = EmbeddingService.generate_embedding(chunk)
            doc = Document(
                knowledge_base_id=kb_id,
                title=os.path.basename(filepath),
                content=chunk,
                embedding=embedding,
                metadata={'original_format': 'text'}
            )
            documents.append(doc)
        return documents

    @staticmethod
    def split_document(content: str, method: str = 'char') -> List[str]:
        """根据指定方法分割文档"""
        if method == 'semantic':
            return SemanticSplitter.split(content)
        elif method == 'qa':
            qa_pairs = QASplitter.split(content)
            chunks = []
            for qa in qa_pairs:
                chunk = f"Q: {qa['question']}\nA: {qa['answer']}"
                chunks.append(chunk)
            return chunks
        elif method == 'raw':
            return [content]
        else:
            return split_text(content, method=method) 

    @staticmethod
    def process_document_async(file_path, knowledge_base_id, method, task_id):
        """异步处理文档"""
        app = create_app(os.getenv('FLASK_CONFIG', 'development'))
        with app.app_context():
            try:
                logger.info(f"Starting async processing for task {task_id}")
                logger.info(f"Parameters: file_path={file_path}, kb_id={knowledge_base_id}, method={method}")
                
                # 更新任务状态
                task_manager.update_task(
                    task_id,
                    status='running',
                    started_at=datetime.utcnow()
                )
                
                # 根据文件类型选择处理方法
                file_ext = os.path.splitext(file_path)[1].lower()
                
                if file_ext in ['.csv', '.xlsx', '.xls']:
                    # 处理结构化文件
                    logger.info(f"Processing structured file: {file_path}")
                    try:
                        # 读取文件
                        df = DocumentService._read_structured_file(file_path)
                        total_chunks = len(df)
                        logger.info(f"Read structured file with {total_chunks} rows")
                        
                        # 更新任务进度
                        task_manager.update_task(
                            task_id,
                            progress=30,
                            message=f"文件读取完成，共 {total_chunks} 行"
                        )
                        
                        # 如果选择了保持原格式
                        if method == 'raw':
                            # 按行处理
                            processed_chunks = 0
                            batch_size = 100
                            
                            for i in range(0, total_chunks, batch_size):
                                batch_df = df.iloc[i:i+batch_size]
                                docs = []
                                
                                for _, row in batch_df.iterrows():
                                    try:
                                        # 将行数据转换为JSON
                                        # content = row.to_json(force_ascii=False)
                                        
                                        # 构建问答格式的内容
                                        qa_content = {
                                            "question": row[1],
                                            "answer": row[2]
                                        }
                                        qa_text = json.dumps(qa_content, ensure_ascii=False)
                                        
                                        # 生成向量嵌入
                                        embedding = EmbeddingService().generate_embedding(qa_text)
                                        
                                        doc = Document(
                                            knowledge_base_id=knowledge_base_id,
                                            title=os.path.basename(file_path),
                                            content=qa_text,
                                            embedding=embedding,
                                            metadata={
                                                'original_format': 'structured',
                                                'columns': list(df.columns),
                                                'qa_format': True
                                            }
                                        )
                                        docs.append(doc)
                                        processed_chunks += 1
                                    except Exception as e:
                                        logger.error(f"Error processing row: {str(e)}")
                                        continue
                                
                                # 批量保存
                                if docs:
                                    db.session.bulk_save_objects(docs)
                                    db.session.commit()
                                
                                # 更新进度
                                progress = 30 + int(60 * processed_chunks / total_chunks)
                                task_manager.update_task(
                                    task_id,
                                    progress=progress,
                                    message=f"已处理 {processed_chunks}/{total_chunks} 行"
                                )
                        else:
                            # 使用 content 列进行分割处理
                            if 'content' not in df.columns:
                                raise ValueError("CSV/Excel file must contain a 'content' column")
                            
                            contents = df['content'].dropna().astype(str).tolist()
                            total_chunks = len(contents)
                            processed_chunks = 0
                            
                            for content in contents:
                                try:
                                    # 分割文本
                                    chunks = DocumentService.split_document(content, method)
                                    
                                    # 处理每个分割后的文本块
                                    for chunk in chunks:
                                        try:
                                            embedding = EmbeddingService().generate_embedding(chunk)
                                            doc = Document(
                                                knowledge_base_id=knowledge_base_id,
                                                title=os.path.basename(file_path),
                                                content=chunk,
                                                embedding=embedding,
                                                metadata={'original_format': 'text'}
                                            )
                                            db.session.add(doc)
                                            processed_chunks += 1
                                        except Exception as e:
                                            logger.error(f"Error processing chunk: {str(e)}")
                                            continue
                                    
                                    # 每处理100个文档提交一次
                                    if processed_chunks % 100 == 0:
                                        db.session.commit()
                                        
                                    # 更新进度
                                    progress = 30 + int(60 * processed_chunks / total_chunks)
                                    task_manager.update_task(
                                        task_id,
                                        progress=progress,
                                        message=f"已处理 {processed_chunks}/{total_chunks} 个片段"
                                    )
                                except Exception as e:
                                    logger.error(f"Error processing content: {str(e)}")
                                    continue
                            
                            # 最终提交
                            db.session.commit()
                
                    except Exception as e:
                        logger.error(f"Error processing structured file: {str(e)}")
                        raise
                
                else:
                    # 处理文本文件
                    logger.info(f"Processing text file: {file_path}")
                    processor = FileProcessor.get_processor(file_path)
                    content = processor.read()
                    
                    # 分割文本
                    chunks = DocumentService.split_document(content, method)
                    total_chunks = len(chunks)
                    logger.info(f"Split text into {total_chunks} chunks")
                    
                    # 更新任务进度
                    task_manager.update_task(
                        task_id,
                        progress=30,
                        message=f"文档分割完成，共 {total_chunks} 个片段"
                    )
                    
                    # 处理每个文本块
                    processed_chunks = 0
                    for chunk in chunks:
                        try:
                            embedding = EmbeddingService().generate_embedding(chunk)
                            doc = Document(
                                knowledge_base_id=knowledge_base_id,
                                title=os.path.basename(file_path),
                                content=chunk,
                                embedding=embedding,
                                metadata={'original_format': 'text'}
                            )
                            db.session.add(doc)
                            processed_chunks += 1
                            
                            # 每处理100个文档提交一次
                            if processed_chunks % 100 == 0:
                                db.session.commit()
                                
                            # 更新进度
                            progress = 30 + int(60 * processed_chunks / total_chunks)
                            task_manager.update_task(
                                task_id,
                                progress=progress,
                                message=f"已处理 {processed_chunks}/{total_chunks} 个片段"
                            )
                        except Exception as e:
                            logger.error(f"Error processing chunk: {str(e)}")
                            continue
                    
                    # 最终提交
                    db.session.commit()
                
                # 完成任务
                task_manager.update_task(
                    task_id,
                    status='completed',
                    progress=100,
                    message=f"处理完成，共导入 {processed_chunks} 个文档片段",
                    completed_at=datetime.utcnow()
                )
                logger.info(f"Task {task_id} completed successfully")
                
            except Exception as e:
                logger.error(f"Error processing document: {str(e)}", exc_info=True)
                task_manager.update_task(
                    task_id,
                    status='failed',
                    error=str(e)
                )
                raise
            finally:
                # 清理临时文件
                if os.path.exists(file_path):
                    os.remove(file_path)
                    logger.info(f"Temporary file {file_path} removed") 