import logging
import warnings
from rank_bm25 import BM25Okapi
import jieba
from transformers import AutoTokenizer

# 配置日志和警告
warnings.filterwarnings("ignore", category=UserWarning)
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
                    level=logging.INFO)
logger = logging.getLogger(__name__)


class TokenCounter:
    def __init__(self, llm_local_dir):
        self.llm_local_dir = llm_local_dir
        # 加载本地tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(self.llm_local_dir)

    def token_count(self, text):
        # 使用tokenizer将文本转换为tokens
        tokens = self.tokenizer.encode(text, add_special_tokens=False)
        # 返回token数量
        return len(tokens)


### 修改chunk结构以适配新输入
def file_chunker(spliler, session):
    template_chunks = []
    reference_chunks = []
    # 处理templateFiles
    template_files = session["data"]["templateFiles"]
    for file in template_files:
        file_id = file["fileId"]
        file_title = file["title"]
        content = file["content"]
        # 分割文本
        split_contents = spliler.split_text(content)
        # 添加到结果列表
        for chunk_idx, chunk in enumerate(split_contents):
            template_chunks.append({
                "fileid": file_id,
                "title": file_title,
                "chunkid": chunk_idx,
                "content": chunk
            })

    # 处理materialFiles
    material_files = session["data"]["materialFiles"]
    for file in material_files:
        file_id = file["fileId"]
        content = file["content"]
        file_title = file["title"]
        # 分割文本
        split_contents = spliler.split_text(content)
        # 添加到结果列表
        for chunk_idx, chunk in enumerate(split_contents):
            reference_chunks.append({
                "fileid": file_id,
                "title": file_title,
                "chunkid": chunk_idx,
                "content": chunk
            })
    logger.info("模板分块完毕，参考文献分块完毕")
    return template_chunks, reference_chunks


"""
    将已排序的分块按token限制分组批处理
    核心目标：保证每批的token总数不超过模型输入限制
    特殊处理：同一文件的分块尽量不分到不同批次

    分块数据结构：
    {
        "fileid": "file1",       # 文件唯一标识
        "chunkid": 0,            # 分块序号
        "title": "AI白皮书",     # 文档标题
        "content": "深度学习...", # 原始内容
        "claim": "要点1：...",   # 提取的要点
        "claim_token_num": 150   # 要点token数（含标题）
    }
"""


def batch_chunks(chunks, max_tokens_per_batch):
    """
    将chunks按token数量分批，确保每批总token数不超过max_tokens_per_batch
    
    参数:
    chunks (list): 包含多个chunk字典的列表，每个chunk包含fileid, chunkid, content和token_num
    max_tokens_per_batch (int): 每批最大token数量
    
    返回:
    list: 分批后的chunks列表，每个元素是一个批次，包含多个chunk
    """
    if not chunks:
        return []

    batches = []
    current_batch = []
    current_tokens = 0

    for chunk in chunks:
        chunk_tokens = chunk['claim_token_num']

        # 如果当前chunk加入后会超过限制，则创建新批次
        if current_tokens + chunk_tokens > max_tokens_per_batch:
            if current_batch:  # 确保当前批次不为空
                batches.append(current_batch)
            current_batch = [chunk]
            current_tokens = chunk_tokens
        else:
            # 否则加入当前批次
            current_batch.append(chunk)
            current_tokens += chunk_tokens

    # 添加最后一个批次
    if current_batch:
        batches.append(current_batch)

    # 数据结果[[chunk1, chunk2, ...], [chunk3, chunk4, ...], ...]
    return batches

class BM25ChunkMatcher:
    def __init__(self, chunks):
        """
        初始化BM25匹配器
        :param chunks: 分割后的文本片段列表，每个元素是包含fileid、title、chunkid和content的字典
        """
        self.chunks = chunks
        self.chunk_contents = [chunk["content"] for chunk in chunks]
        self.tokenized_chunks = self._tokenize_chunks()
        self.bm25 = BM25Okapi(self.tokenized_chunks)
    
    def _tokenize(self, text):
        """分词处理，使用结巴分词处理中文"""
        return list(jieba.cut(text))
    
    def _tokenize_chunks(self):
        """对所有文本片段进行分词"""
        return [self._tokenize(chunk) for chunk in self.chunk_contents]
    
    def match_opinion_to_chunks(self, opinion, top_n=3):
        """
        将观点匹配到最相关的文本片段
        :param opinion: 要匹配的观点
        :param top_n: 返回前n个最匹配的片段
        :return: 包含匹配信息的字典列表
        """
        # 对观点进行分词
        tokenized_opinion = self._tokenize(opinion)
        
        # 计算每个片段的分数
        scores = self.bm25.get_scores(tokenized_opinion)
        
        # 排序并获取最匹配的片段
        results = []
        # 获取排序后的索引（从高到低）
        sorted_indices = scores.argsort()[::-1]
        
        for idx in sorted_indices[:top_n]:
            # 获取对应的chunk信息
            matched_chunk = self.chunks[idx]
            results.append({
                "fileid": matched_chunk["fileid"],
                "title": matched_chunk["title"],
                "chunkid": matched_chunk["chunkid"],
                "content": matched_chunk["content"],
                "score": scores[idx],
                "rank": len(results) + 1  # 排名
            })
        
        return results

def match_claim_chunk(claim, chunks, rank=0):
    matcher = BM25ChunkMatcher(chunks)
    matches = matcher.match_opinion_to_chunks(claim)
    if not matches:
        return None
    if rank >= 0:  # 直接返回匹配结果
        return matches[:rank + 1]  # 返回前rank+1个匹配结果
    return matches

if __name__ == "__main__":
    # 替换为你的本地模型目录路径
    # model_dir = "/data/jinke/workspace/models/qwen3_14b_org"
    model_dir = "D:\\workspaces\\models\\Qwen2.5-7B-Instruct"
    counter = TokenCounter(model_dir)

    test_text = "采购管理存在系统性漏洞，未建立公司级供应商管理体系，存在超出供应商经营范围采购行为，集中采购执行部门缺失"

    count = counter.token_count(test_text)
    print(f"文本的token数量为: {count}")

#     r_splitter = RecursiveCharacterTextSplitter(
#         chunk_size=20,
#         chunk_overlap=0, 
#         separators=["\n\n", "\n", " ", ""] #默认
#     )
#     some_text = """When writing documents, writers will use document structure to group content. \
# This can convey to the reader, which idea's are related. For example, closely related ideas \
# are in sentances. Similar ideas are in paragraphs. Paragraphs form a document. \n\n  \
# Paragraphs are often delimited with a carriage return or two carriage returns. \
# Carriage returns are the "backslash n" you see embedded in this string. \
# Sentences have a period at the end, but also, have a space.\
# and words are separated by space."""

#     print(r_splitter.split_text(some_text))
