import os
import json
import pickle
from typing import List, Union
from pathlib import Path
from tqdm import tqdm
import hashlib

from dotenv import load_dotenv
from openai import OpenAI
from rank_bm25 import BM25Okapi
import faiss
import numpy as np
from tenacity import retry, wait_fixed, stop_after_attempt
import dashscope
from dashscope import TextEmbedding

# BM25Ingestor：BM25索引构建与保存工具
class BM25Ingestor:
    def __init__(self):
        pass

    def create_bm25_index(self, chunks: List[str]) -> BM25Okapi:
        """从文本块列表创建BM25索引"""
        tokenized_chunks = [chunk.split() for chunk in chunks]
        return BM25Okapi(tokenized_chunks)
    
    def process_reports(self, all_reports_dir: Path, output_dir: Path):
        """
        批量处理所有报告，生成并保存BM25索引。
        参数：
            all_reports_dir (Path): 存放JSON报告的目录
            output_dir (Path): 保存BM25索引的目录
        """
        output_dir.mkdir(parents=True, exist_ok=True)
        all_report_paths = list(all_reports_dir.glob("*.json"))

        for report_path in tqdm(all_report_paths, desc="Processing reports for BM25"):
            # 加载报告
            with open(report_path, 'r', encoding='utf-8') as f:
                report_data = json.load(f)
                
            # 提取文本块并创建BM25索引
            text_chunks = [chunk['text'] for chunk in report_data['content']['chunks']]
            bm25_index = self.create_bm25_index(text_chunks)
            
            # 保存BM25索引，文件名用sha1_name
            sha1_name = report_data["metainfo"]["sha1"]
            output_file = output_dir / f"{sha1_name}.pkl"
            with open(output_file, 'wb') as f:
                pickle.dump(bm25_index, f)
                
        print(f"Processed {len(all_report_paths)} reports")

# VectorDBIngestor：向量库构建与保存工具
class VectorDBIngestor:
    def __init__(self):
        # 显式加载项目根目录的 .env，避免在 data/stock_data 目录下找不到
        try:
            from pathlib import Path as _Path
            _root = _Path(__file__).parent.parent
            load_dotenv(dotenv_path=_root / ".env")
        except Exception:
            load_dotenv()
        # 读取并规范化 provider
        self.embedding_provider = os.getenv("EMBEDDING_PROVIDER", "dashscope").lower()
        # 如果本地 LLM 使用 ollama 或设置了本地嵌入模型，则强制走 ollama
        _llm_provider = os.getenv("LLM_PROVIDER", "").lower()
        _ollama_emb_model_env = os.getenv("OLLAMA_EMBEDDING_MODEL", "")
        if _llm_provider == "ollama" or _ollama_emb_model_env:
            self.embedding_provider = "ollama"
        self.ollama_emb_model = _ollama_emb_model_env if _ollama_emb_model_env else "bge-m3"
        self.llm = None
        if self.embedding_provider == "ollama":
            base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434/v1")
            self.llm = OpenAI(
                base_url=base_url,
                api_key=os.getenv("OLLAMA_API_KEY", "ollama"),
                timeout=None,
                max_retries=2
            )
        elif self.embedding_provider == "openai":
            self.llm = OpenAI(
                api_key=os.getenv("OPENAI_API_KEY"),
                timeout=None,
                max_retries=2
            )
        else:
            # DashScope 路径按需导入与使用
            dashscope.api_key = os.getenv("DASHSCOPE_API_KEY")
        # 调试打印，确认实际生效的嵌入配置
        print(f"[调试] Ingestor.init provider={self.embedding_provider}, model={self.ollama_emb_model}")

    @retry(wait=wait_fixed(10), stop=stop_after_attempt(2))
    def _get_embeddings(self, text: Union[str, List[str]], model: str = None) -> List[List[float]]:
        # 获取文本或文本块的嵌入向量，支持重试；根据 EMBEDDING_PROVIDER 选择实现
        if isinstance(text, str) and not text.strip():
            raise ValueError("Input text cannot be an empty string.")
        
        # 统一为列表
        text_chunks = text if isinstance(text, list) else [text]
        if not all(isinstance(x, str) for x in text_chunks):
            raise ValueError("所有待嵌入文本必须为字符串类型！实际类型: {}".format([type(x) for x in text_chunks]))
        text_chunks = [x for x in text_chunks if x.strip()]
        if not text_chunks:
            raise ValueError("所有待嵌入文本均为空字符串！")

        print('start embedding ================================')
        MAX_BATCH_SIZE = 25
        results: List[List[float]] = []

        # Ollama / OpenAI 路径
        if self.embedding_provider in ("ollama", "openai"):
            emb_model = self.ollama_emb_model if self.embedding_provider == "ollama" else (model or "text-embedding-3-small")
            for i in range(0, len(text_chunks), MAX_BATCH_SIZE):
                batch = text_chunks[i:i+MAX_BATCH_SIZE]
                resp = self.llm.embeddings.create(input=batch, model=emb_model)
                results.extend([item.embedding for item in resp.data])
            return results

        # DashScope 路径
        LOG_FILE = 'embedding_error.log'
        for i in range(0, len(text_chunks), MAX_BATCH_SIZE):
            batch = text_chunks[i:i+MAX_BATCH_SIZE]
            resp = TextEmbedding.call(model=TextEmbedding.Models.text_embedding_v1, input=batch)
            if 'output' in resp and 'embeddings' in resp['output']:
                for emb in resp['output']['embeddings']:
                    vec = emb.get('embedding')
                    if not vec:
                        idx = getattr(emb, 'text_index', None)
                        error_text = batch[idx] if isinstance(idx, int) and idx < len(batch) else None
                        with open(LOG_FILE, 'a', encoding='utf-8') as f:
                            f.write(f"DashScope返回的embedding为空，text_index={idx}，文本内容如下：\n{error_text}\n{'-'*60}\n")
                        raise RuntimeError(f"DashScope返回的embedding为空，text_index={idx}，文本内容已写入 {LOG_FILE}")
                    results.append(vec)
            elif 'output' in resp and 'embedding' in resp['output']:
                vec = resp['output']['embedding']
                if not vec:
                    with open(LOG_FILE, 'a', encoding='utf-8') as f:
                        f.write("DashScope返回的embedding为空，文本内容如下：\n{}\n{}\n".format(batch[0] if batch else None, '-'*60))
                    raise RuntimeError("DashScope返回的embedding为空，文本内容已写入 {}".format(LOG_FILE))
                results.append(vec)
            else:
                raise RuntimeError(f"DashScope embedding API返回格式异常: {resp}")
        return results

    def _create_vector_db(self, embeddings: List[float]):
        # 用faiss构建向量库，采用内积（余弦距离）
        embeddings_array = np.array(embeddings, dtype=np.float32)
        dimension = len(embeddings[0])
        index = faiss.IndexFlatIP(dimension)  # Cosine distance
        index.add(embeddings_array)
        return index
    
    def _process_report(self, report: dict):
        # 针对单份报告，提取文本块并生成向量库
        text_chunks = [chunk['text'] for chunk in report['content']['chunks']]
        # 过滤空内容，超长内容截断到 2048 字符
        max_len = 2048
        text_chunks = [t[:max_len] for t in text_chunks if len(t) > 0]
        embeddings = self._get_embeddings(text_chunks)
        index = self._create_vector_db(embeddings)
        return index

    def process_reports(self, all_reports_dir: Path, output_dir: Path):
        # 批量处理所有报告，生成并保存faiss向量库
        all_report_paths = list(all_reports_dir.glob("*.json"))
        output_dir.mkdir(parents=True, exist_ok=True)

        for report_path in tqdm(all_report_paths, desc="Processing reports for FAISS"):
            # 加载报告
            with open(report_path, 'r', encoding='utf-8') as f:
                report_data = json.load(f)
            index = self._process_report(report_data)
            # 用 metainfo['sha1'] 作为 faiss 文件名，避免中文和特殊字符
            sha1 = report_data["metainfo"].get("sha1", "")
            if not sha1:
                # 如果 sha1 为空，使用文件名生成一个
                import hashlib
                file_stem = report_path.stem
                sha1 = hashlib.sha1(file_stem.encode('utf-8')).hexdigest()
                print(f"警告：{report_path} 缺少 sha1 字段，自动生成: {sha1}")
            faiss_file_path = output_dir / f"{sha1}.faiss"
            faiss.write_index(index, str(faiss_file_path))

        print(f"Processed {len(all_report_paths)} reports")