#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Elasticsearch检索工具
扩展qwen_agent框架，支持Elasticsearch文档检索
"""

import os
import json
import hashlib
import logging
from typing import List, Dict, Any, Optional
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from qwen_agent.tools.base import BaseTool
from qwen_agent.tools.simple_doc_parser import SimpleDocParser

logger = logging.getLogger(__name__)


class ElasticsearchRetrieval(BaseTool):
    """
    Elasticsearch检索工具
    替代qwen_agent默认的retrieval工具，提供高性能文档检索
    """
    
    name = 'retrieval'
    description = '从Elasticsearch索引中检索与查询相关的文档片段'
    parameters = [{
        'name': 'query',
        'type': 'string',
        'description': '搜索查询字符串',
        'required': True
    }]
    
    def __init__(self, cfg: Optional[Dict] = None):
        """
        初始化Elasticsearch检索工具
        
        Args:
            cfg: 配置字典，包含ES连接信息
        """
        super().__init__(cfg)
        self.cfg = cfg or {}
        
        # ES配置
        self.es_host = self.cfg.get('es_host', 'https://localhost')
        self.es_port = self.cfg.get('es_port', 9200)
        self.es_user = self.cfg.get('es_user', 'elastic')
        self.es_password = self.cfg.get('es_password', 'ncUg27UP5aMSzSElVXAX')
        self.index_name = self.cfg.get('index_name', 'qwen_agent_docs')
        self.verify_certs = self.cfg.get('verify_certs', False)
        
        # 文档处理配置
        self.chunk_size = self.cfg.get('chunk_size', 500)
        self.chunk_overlap = self.cfg.get('chunk_overlap', 50)
        self.max_results = self.cfg.get('max_results', 5)
        
        # 初始化组件
        self.es_client = None
        self.doc_parser = SimpleDocParser(cfg)
        self.indexed_files = set()
        
        # 连接ES
        self._connect_elasticsearch()
        self._create_index_if_not_exists()
    
    def _connect_elasticsearch(self) -> bool:
        """连接到Elasticsearch"""
        try:
            self.es_client = Elasticsearch(
                f"{self.es_host}:{self.es_port}",
                basic_auth=(self.es_user, self.es_password),
                verify_certs=self.verify_certs,
                request_timeout=60,
                max_retries=3,
                retry_on_timeout=True
            )
            
            if self.es_client.ping():
                logger.info("✅ 成功连接到Elasticsearch")
                return True
            else:
                logger.error("❌ 无法连接到Elasticsearch")
                return False
                
        except Exception as e:
            logger.error(f"❌ ES连接失败: {str(e)}")
            return False
    
    def _create_index_if_not_exists(self):
        """创建索引（如果不存在）"""
        if not self.es_client:
            return
            
        try:
            if not self.es_client.indices.exists(index=self.index_name):
                mapping = {
                    "mappings": {
                        "properties": {
                            "file_path": {"type": "keyword"},
                            "file_name": {"type": "keyword"},
                            "chunk_id": {"type": "keyword"},
                            "content": {
                                "type": "text",
                                "analyzer": "standard"
                            },
                            "metadata": {"type": "object"},
                            "created_time": {"type": "date"},
                            "file_hash": {"type": "keyword"}
                        }
                    },
                    "settings": {
                        "number_of_shards": 1,
                        "number_of_replicas": 0
                    }
                }
                
                self.es_client.indices.create(index=self.index_name, body=mapping)
                logger.info(f"✅ 创建索引: {self.index_name}")
            else:
                logger.info(f"📋 索引已存在: {self.index_name}")
                
        except Exception as e:
            logger.error(f"❌ 创建索引失败: {str(e)}")
    
    def _get_file_hash(self, file_path: str) -> str:
        """获取文件哈希值"""
        try:
            with open(file_path, 'rb') as f:
                content = f.read()
                return hashlib.md5(content).hexdigest()
        except Exception:
            return ""
    
    def _is_file_indexed(self, file_path: str) -> bool:
        """检查文件是否已被索引"""
        if not self.es_client:
            return False
            
        try:
            file_hash = self._get_file_hash(file_path)
            if not file_hash:
                return False
            
            query = {
                "query": {
                    "bool": {
                        "must": [
                            {"term": {"file_path": file_path}},
                            {"term": {"file_hash": file_hash}}
                        ]
                    }
                }
            }
            
            response = self.es_client.count(index=self.index_name, body=query)
            return response['count'] > 0
            
        except Exception as e:
            logger.error(f"检查文件索引状态失败: {str(e)}")
            return False
    
    def _chunk_text(self, text: str) -> List[str]:
        """将文本分块"""
        if len(text) <= self.chunk_size:
            return [text]
        
        chunks = []
        start = 0
        
        while start < len(text):
            end = start + self.chunk_size
            
            # 尝试在句号处分割
            if end < len(text):
                for i in range(end, min(end + 100, len(text))):
                    if text[i] in '。！？\n':
                        end = i + 1
                        break
            
            chunk = text[start:end].strip()
            if chunk:
                chunks.append(chunk)
            
            start = end - self.chunk_overlap
            if start >= len(text):
                break
        
        return chunks
    
    def index_files(self, files: List[str]) -> bool:
        """索引文件列表"""
        if not self.es_client:
            logger.error("ES客户端未连接")
            return False
        
        logger.info(f"📚 开始索引 {len(files)} 个文件")
        
        actions = []
        indexed_count = 0
        
        for file_path in files:
            try:
                # 检查文件是否已索引
                if self._is_file_indexed(file_path):
                    logger.info(f"⏭️ 跳过已索引文件: {os.path.basename(file_path)}")
                    continue
                
                logger.info(f"📄 处理文件: {os.path.basename(file_path)}")
                
                # 解析文档
                documents = self.doc_parser.load(file_path)
                if not documents:
                    logger.warning(f"⚠️ 无法解析文件: {file_path}")
                    continue
                
                # 提取文本内容
                full_text = ""
                metadata = {}
                for doc in documents:
                    full_text += doc.page_content + "\n"
                    metadata.update(doc.metadata)
                
                if not full_text.strip():
                    logger.warning(f"⚠️ 文件内容为空: {file_path}")
                    continue
                
                # 文本分块
                chunks = self._chunk_text(full_text)
                file_hash = self._get_file_hash(file_path)
                
                # 为每个块创建索引文档
                for i, chunk in enumerate(chunks):
                    chunk_id = f"{os.path.basename(file_path)}_{i}"
                    
                    action = {
                        "_index": self.index_name,
                        "_id": f"{file_hash}_{i}",
                        "_source": {
                            "file_path": file_path,
                            "file_name": os.path.basename(file_path),
                            "chunk_id": chunk_id,
                            "content": chunk,
                            "metadata": metadata,
                            "created_time": "now",
                            "file_hash": file_hash
                        }
                    }
                    actions.append(action)
                
                indexed_count += 1
                logger.info(f"✅ 文件分块完成: {len(chunks)} 个块")
                
            except Exception as e:
                logger.error(f"❌ 处理文件失败 {file_path}: {str(e)}")
                continue
        
        # 批量索引
        if actions:
            try:
                success_count, failed_items = bulk(
                    self.es_client,
                    actions,
                    chunk_size=100,
                    request_timeout=60
                )
                
                self.es_client.indices.refresh(index=self.index_name)
                logger.info(f"✅ 批量索引完成: {success_count} 个文档块")
                
                if failed_items:
                    logger.warning(f"⚠️ {len(failed_items)} 个文档块索引失败")
                
                return True
                
            except Exception as e:
                logger.error(f"❌ 批量索引失败: {str(e)}")
                return False
        
        logger.info("📊 没有新文件需要索引")
        return True
    
    def search(self, query: str, k: int = None) -> List[Dict[str, Any]]:
        """搜索文档"""
        if not self.es_client:
            logger.error("ES客户端未连接")
            return []
        
        if k is None:
            k = self.max_results
        
        try:
            search_body = {
                "query": {
                    "bool": {
                        "should": [
                            {
                                "match": {
                                    "content": {
                                        "query": query,
                                        "boost": 2.0
                                    }
                                }
                            },
                            {
                                "match_phrase": {
                                    "content": {
                                        "query": query,
                                        "boost": 3.0
                                    }
                                }
                            }
                        ],
                        "minimum_should_match": 1
                    }
                },
                "highlight": {
                    "fields": {
                        "content": {
                            "fragment_size": 150,
                            "number_of_fragments": 2
                        }
                    }
                },
                "size": k,
                "_source": ["file_name", "content", "metadata", "chunk_id"]
            }
            
            response = self.es_client.search(
                index=self.index_name,
                body=search_body
            )
            
            results = []
            for hit in response['hits']['hits']:
                source = hit['_source']
                
                # 构造类似langchain Document的结果
                result = {
                    'page_content': source['content'],
                    'metadata': {
                        'source': source['file_name'],
                        'chunk_id': source['chunk_id'],
                        'score': hit['_score'],
                        **source.get('metadata', {})
                    }
                }
                
                # 添加高亮信息
                if 'highlight' in hit:
                    result['metadata']['highlights'] = hit['highlight'].get('content', [])
                
                results.append(result)
            
            logger.info(f"🔍 搜索完成: 找到 {len(results)} 个相关文档")
            return results
            
        except Exception as e:
            logger.error(f"❌ 搜索失败: {str(e)}")
            return []
    
    def call(self, params: str, **kwargs) -> str:
        """工具调用接口"""
        try:
            if isinstance(params, str):
                params = json.loads(params)
            
            query = params.get('query', '')
            if not query:
                return json.dumps([], ensure_ascii=False)
            
            # 执行搜索
            results = self.search(query)
            
            # 格式化结果
            formatted_results = []
            for result in results:
                formatted_results.append({
                    'content': result['page_content'],
                    'source': result['metadata']['source'],
                    'score': result['metadata']['score'],
                    'chunk_id': result['metadata']['chunk_id']
                })
            
            return json.dumps(formatted_results, ensure_ascii=False)
            
        except Exception as e:
            logger.error(f"❌ 工具调用失败: {str(e)}")
            return json.dumps([], ensure_ascii=False)


# 兼容性函数，用于替换默认的retrieval工具
def create_elasticsearch_retrieval(cfg: Optional[Dict] = None) -> ElasticsearchRetrieval:
    """创建Elasticsearch检索工具实例"""
    return ElasticsearchRetrieval(cfg)