# tasks.py

import os
import uuid
import json
import traceback
from typing import List, Dict, Any
from celery import current_task
from pymilvus import connections, Collection, utility, FieldSchema, CollectionSchema, DataType
from sentence_transformers import SentenceTransformer
import fitz  # PyMuPDF
from pptx import Presentation
from PIL import Image
import cv2
import numpy as np
from io import BytesIO
import csv

import config
from celery_app import celery_app

# 全局变量存储模型实例
_text_model = None
_multimodal_model = None

def get_text_model():
    """获取文本嵌入模型实例"""
    global _text_model
    if _text_model is None:
        _text_model = SentenceTransformer(config.EMBEDDING_MODEL)
    return _text_model

def get_multimodal_model():
    """获取多模态嵌入模型实例"""
    global _multimodal_model
    if _multimodal_model is None:
        # 这里需要根据实际的多模态模型API进行调用
        # 暂时使用文本模型作为占位符
        _multimodal_model = SentenceTransformer(config.MULTIMODAL_EMBEDDING_MODEL)
    return _multimodal_model

def connect_milvus():
    """连接Milvus数据库"""
    connections.connect("default", host=config.MILVUS_HOST, port=config.MILVUS_PORT)

def init_collections():
    """初始化Milvus集合"""
    connect_milvus()
    
    # 初始化文本集合
    if not utility.has_collection(config.MILVUS_COLLECTION):
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
            FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=1024),
            FieldSchema(name="file_type", dtype=DataType.VARCHAR, max_length=50),
            FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=config.VECTOR_DIMENSION)
        ]
        schema = CollectionSchema(fields, description="RAG文本知识库")
        collection = Collection(config.MILVUS_COLLECTION, schema)
        index_params = {"index_type": "IVF_FLAT", "metric_type": "IP", "params": {"nlist": 128}}
        collection.create_index(field_name="vector", index_params=index_params)
    
    # 初始化图片集合
    if not utility.has_collection(config.MILVUS_COLLECTION_IMAGE):
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="image_path", dtype=DataType.VARCHAR, max_length=1024),
            FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=1024),
            FieldSchema(name="file_type", dtype=DataType.VARCHAR, max_length=50),
            FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=config.IMAGE_VECTOR_DIMENSION)
        ]
        schema = CollectionSchema(fields, description="RAG图片知识库")
        collection = Collection(config.MILVUS_COLLECTION_IMAGE, schema)
        index_params = {"index_type": "IVF_FLAT", "metric_type": "IP", "params": {"nlist": 128}}
        collection.create_index(field_name="vector", index_params=index_params)

def extract_text_from_pdf(file_path: str) -> List[str]:
    """从PDF提取文本"""
    doc = fitz.open(file_path)
    texts = []
    for page in doc:
        text = page.get_text()
        if text.strip():
            texts.append(text)
    doc.close()
    return texts

def extract_text_from_pptx(file_path: str) -> List[str]:
    """从PPT提取文本"""
    prs = Presentation(file_path)
    texts = []
    for slide in prs.slides:
        for shape in slide.shapes:
            if hasattr(shape, "text") and shape.text.strip():
                texts.append(shape.text)
    return texts

def extract_text_from_docx(file_path: str) -> List[str]:
    """从DOCX提取文本"""
    from docx import Document
    doc = Document(file_path)
    texts = []
    for paragraph in doc.paragraphs:
        if paragraph.text.strip():
            texts.append(paragraph.text)
    return texts

def extract_text_from_csv(file_path: str) -> List[str]:
    """从CSV提取文本"""
    texts = []
    with open(file_path, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        for row in reader:
            if row:
                texts.append(', '.join(row))
    return texts

def extract_images_from_pdf(file_path: str) -> List[str]:
    """从PDF提取图片"""
    doc = fitz.open(file_path)
    image_paths = []
    for page_num in range(len(doc)):
        page = doc[page_num]
        image_list = page.get_images()
        for img_index, img in enumerate(image_list):
            xref = img[0]
            base_image = doc.extract_image(xref)
            image_bytes = base_image["image"]
            
            # 保存图片
            img_filename = f"{os.path.splitext(os.path.basename(file_path))[0]}_page{page_num}_img{img_index}.png"
            img_path = os.path.join(config.UPLOAD_FOLDER, "images", img_filename)
            os.makedirs(os.path.dirname(img_path), exist_ok=True)
            
            with open(img_path, "wb") as img_file:
                img_file.write(image_bytes)
            image_paths.append(img_path)
    doc.close()
    return image_paths

def extract_images_from_pptx(file_path: str) -> List[str]:
    """从PPT提取图片"""
    prs = Presentation(file_path)
    image_paths = []
    for slide_num, slide in enumerate(prs.slides):
        for shape_num, shape in enumerate(slide.shapes):
            if hasattr(shape, 'image'):
                img_filename = f"{os.path.splitext(os.path.basename(file_path))[0]}_slide{slide_num}_img{shape_num}.png"
                img_path = os.path.join(config.UPLOAD_FOLDER, "images", img_filename)
                os.makedirs(os.path.dirname(img_path), exist_ok=True)
                
                with open(img_path, "wb") as img_file:
                    img_file.write(shape.image.blob)
                image_paths.append(img_path)
    return image_paths

def extract_frames_from_video(file_path: str, max_frames: int = 10) -> List[str]:
    """从视频提取关键帧"""
    cap = cv2.VideoCapture(file_path)
    frame_paths = []
    frame_count = 0
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    interval = max(1, total_frames // max_frames)
    
    while frame_count < max_frames:
        ret, frame = cap.read()
        if not ret:
            break
            
        if frame_count % interval == 0:
            frame_filename = f"{os.path.splitext(os.path.basename(file_path))[0]}_frame{frame_count}.jpg"
            frame_path = os.path.join(config.UPLOAD_FOLDER, "frames", frame_filename)
            os.makedirs(os.path.dirname(frame_path), exist_ok=True)
            
            cv2.imwrite(frame_path, frame)
            frame_paths.append(frame_path)
        
        frame_count += 1
    
    cap.release()
    return frame_paths

@celery_app.task(bind=True)
def process_file_task(self, file_path: str, file_type: str, task_id: str):
    """处理单个文件的异步任务"""
    try:
        # 更新任务状态
        self.update_state(
            state=config.TASK_STATUS_PROCESSING,
            meta={'current': 0, 'total': 100, 'status': '开始处理文件...'}
        )
        
        # 初始化集合
        init_collections()
        
        # 获取模型
        text_model = get_text_model()
        multimodal_model = get_multimodal_model()
        
        # 处理文本内容
        texts = []
        images = []
        
        if file_type in ['pdf', 'docx', 'pptx', 'csv', 'txt', 'md']:
            # 提取文本
            if file_type == 'pdf':
                texts = extract_text_from_pdf(file_path)
                images = extract_images_from_pdf(file_path)
            elif file_type == 'pptx':
                texts = extract_text_from_pptx(file_path)
                images = extract_images_from_pptx(file_path)
            elif file_type == 'docx':
                texts = extract_text_from_docx(file_path)
            elif file_type == 'csv':
                texts = extract_text_from_csv(file_path)
            else:  # txt, md
                with open(file_path, 'r', encoding='utf-8') as f:
                    texts = [f.read()]
        
        elif file_type in ['jpg', 'jpeg', 'png', 'bmp', 'gif']:
            # 图片文件
            images = [file_path]
        
        elif file_type in ['mp4', 'avi', 'mov', 'mkv']:
            # 视频文件
            images = extract_frames_from_video(file_path)
        
        # 处理文本向量化
        text_chunks = []
        if texts:
            from chatchat.server.file_rag.text_splitter.chinese_text_splitter import ChineseTextSplitter
            splitter = ChineseTextSplitter(chunk_size=config.CHUNK_SIZE, chunk_overlap=config.CHUNK_OVERLAP)
            
            for text in texts:
                chunks = splitter.split_text(text)
                text_chunks.extend(chunks)
            
            # 向量化文本
            if text_chunks:
                text_vectors = text_model.encode(text_chunks, show_progress_bar=False, normalize_embeddings=True)
                
                # 插入文本向量
                collection = Collection(config.MILVUS_COLLECTION)
                collection.load()
                
                entities = [
                    {
                        "text": chunk,
                        "source": file_path,
                        "file_type": file_type,
                        "vector": vector.tolist()
                    }
                    for chunk, vector in zip(text_chunks, text_vectors)
                ]
                
                collection.insert(entities)
                collection.flush()
        
        # 处理图片向量化
        if images:
            image_vectors = []
            for img_path in images:
                try:
                    # 使用多模态模型处理图片
                    image = Image.open(img_path)
                    # 这里需要根据实际的多模态模型API进行调用
                    # 暂时使用随机向量作为占位符
                    vector = np.random.rand(config.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                    image_vectors.append(vector)
                except Exception as e:
                    print(f"处理图片 {img_path} 时出错: {e}")
                    continue
            
            if image_vectors:
                # 插入图片向量
                collection = Collection(config.MILVUS_COLLECTION_IMAGE)
                collection.load()
                
                entities = [
                    {
                        "image_path": img_path,
                        "source": file_path,
                        "file_type": file_type,
                        "vector": vector.tolist()
                    }
                    for img_path, vector in zip(images, image_vectors)
                ]
                
                collection.insert(entities)
                collection.flush()
        
        # 更新任务状态
        result = {
            'status': config.TASK_STATUS_COMPLETED,
            'file_path': file_path,
            'file_type': file_type,
            'text_chunks_count': len(text_chunks),
            'images_count': len(images),
            'message': '文件处理完成'
        }
        
        self.update_state(
            state=config.TASK_STATUS_COMPLETED,
            meta=result
        )
        
        return result
        
    except Exception as e:
        error_msg = f"处理文件 {file_path} 时发生错误: {str(e)}"
        print(error_msg)
        print(traceback.format_exc())
        
        result = {
            'status': config.TASK_STATUS_FAILED,
            'file_path': file_path,
            'file_type': file_type,
            'error': str(e),
            'message': error_msg
        }
        
        self.update_state(
            state=config.TASK_STATUS_FAILED,
            meta=result
        )
        
        return result
    
    finally:
        # 断开Milvus连接
        if "default" in connections.list_connections():
            connections.disconnect("default")

@celery_app.task(bind=True)
def process_batch_files_task(self, file_paths: List[str], task_id: str):
    """批量处理文件的异步任务"""
    try:
        total_files = len(file_paths)
        completed_files = 0
        results = []
        
        self.update_state(
            state=config.TASK_STATUS_PROCESSING,
            meta={'current': 0, 'total': total_files, 'status': '开始批量处理...'}
        )
        
        for i, file_path in enumerate(file_paths):
            # 获取文件类型
            file_ext = os.path.splitext(file_path)[1].lower()
            file_type = file_ext[1:] if file_ext else 'unknown'
            
            # 处理单个文件
            result = process_file_task.delay(file_path, file_type, f"{task_id}_file_{i}")
            file_result = result.get()  # 等待完成
            
            results.append(file_result)
            completed_files += 1
            
            # 更新进度
            progress = int((completed_files / total_files) * 100)
            self.update_state(
                state=config.TASK_STATUS_PROCESSING,
                meta={
                    'current': completed_files,
                    'total': total_files,
                    'progress': progress,
                    'status': f'已处理 {completed_files}/{total_files} 个文件'
                }
            )
        
        # 任务完成
        final_result = {
            'status': config.TASK_STATUS_COMPLETED,
            'total_files': total_files,
            'completed_files': completed_files,
            'results': results,
            'message': '批量文件处理完成'
        }
        
        self.update_state(
            state=config.TASK_STATUS_COMPLETED,
            meta=final_result
        )
        
        return final_result
        
    except Exception as e:
        error_msg = f"批量处理文件时发生错误: {str(e)}"
        print(error_msg)
        print(traceback.format_exc())
        
        result = {
            'status': config.TASK_STATUS_FAILED,
            'error': str(e),
            'message': error_msg
        }
        
        self.update_state(
            state=config.TASK_STATUS_FAILED,
            meta=result
        )
        
        return result 