import json
import requests
import hashlib
from typing import Generator, List, Optional
from sqlalchemy.orm import Session
from datetime import datetime
from config import model_config, embedding_model, HEADERS
from database.database import Conversation, Message, Document, VectorEmbedding

API_URL = model_config["url"]

# 新增：简单的取消标记表（进程内）
CANCEL_FLAGS = {}


def mark_cancel(conversation_id: int):
    CANCEL_FLAGS[conversation_id] = True


def is_canceled(conversation_id: int) -> bool:
    return CANCEL_FLAGS.get(conversation_id, False) is True


def clear_cancel(conversation_id: int):
    if conversation_id in CANCEL_FLAGS:
        del CANCEL_FLAGS[conversation_id]


def parse_stream_chunk(chunk: str) -> str:
    """解析流式响应的单个chunk，提取内容"""
    try:
        # 确保chunk是字符串类型并正确处理UTF-8编码
        if isinstance(chunk, bytes):
            chunk = chunk.decode('utf-8', errors='replace')
        
        # 去除可能的前缀"data: "
        if chunk.startswith("data: "):
            chunk = chunk[6:]
        # 处理结束标记
        if chunk.strip() == "[DONE]":
            return None
        # 解析JSON
        data = json.loads(chunk)
        # 提取内容
        if data.get("choices") and len(data["choices"]) > 0:
            content = data["choices"][0].get("delta", {}).get("content", "")
            # 确保返回的内容是正确的UTF-8字符串
            if isinstance(content, str):
                return content
            elif content is not None:
                return str(content)
            return ""
    except json.JSONDecodeError:
        return None
    except Exception:
        return None
    return None


def stream_response(question: str, conversation_id: int, db: Session, selected_model_name: str = None, system_prompt_id: Optional[int] = None, kb_id: Optional[int] = None, top_k: int = 6, threshold: float = 0.0, include_citations: bool = True, user_id: int = None) -> Generator[str, None, None]:
    
    try:
        print(f"开始处理问题: {question}, 会话ID: {conversation_id}, 选择的模型: {selected_model_name}, 提示词ID: {system_prompt_id}, 用户ID: {user_id}")
        
        # 确定使用的模型名称
        use_model_name = selected_model_name
        print(f"实际使用的模型: {use_model_name}")
        
        # 创建或获取会话
        if conversation_id is None or conversation_id == 0:
            # 创建新会话
            print("创建新会话")
            conversation = Conversation(
                title=question[:50] + "..." if len(question) > 50 else question,
                model_name=use_model_name,
                system_prompt_id=system_prompt_id,
                kb_id=kb_id,  # 保存知识库ID
                user_id=user_id  # 保存用户ID
            )
            db.add(conversation)
            db.commit()
            db.refresh(conversation)
            conversation_id = conversation.conversation_id
            print(f"新会话创建成功，ID: {conversation_id}")
        else:
            # 获取现有会话
            print(f"查找现有会话，ID: {conversation_id}")
            conversation = db.query(Conversation).filter(
                Conversation.conversation_id == conversation_id,
                Conversation.user_id == user_id  # 确保用户只能访问自己的会话
            ).first()
            if not conversation:
                print(f"会话不存在或无权限访问，ID: {conversation_id}")
                yield f"data: {json.dumps({'error': f'会话ID {conversation_id} 不存在或无权限访问'})}\n\n"
                return
            print(f"找到现有会话，ID: {conversation_id}")
        
        # 每次开始前清理旧的取消标记
        clear_cancel(conversation_id)
        
        # 保存用户消息
        print("保存用户消息")
        user_message_count = db.query(Message).filter(Message.conversation_id == conversation_id).count()
        print(f"当前会话消息数量: {user_message_count}")
        user_message = Message(
            conversation_id=conversation_id,
            sender_type=0,  # 0=用户
            content=question,
            sequence=user_message_count + 1
        )
        db.add(user_message)
        db.commit()
        print("用户消息保存成功")

        # 立刻下发初始化事件，保证前端EventSource尽快收到首字节
        try:
            init_event = {"conversation_id": conversation_id}
        except Exception:
            init_event = {"conversation_id": conversation_id}
        
        # 获取会话历史
        messages = db.query(Message).filter(Message.conversation_id == conversation_id).order_by(Message.sequence).all()
        
        # 构建对话历史
        conversation_history = []
        # 处理系统提示词与RAG注入
        citations_payload = None
        # 若本轮未传入system_prompt_id，则回退使用会话中保存的ID，保证每轮都能注入系统提示词与RAG
        effective_system_prompt_id = system_prompt_id or getattr(conversation, 'system_prompt_id', None)
        # 若本轮未传入kb_id，则回退使用会话中保存的ID，保证知识库召回的连续性
        effective_kb_id = kb_id or getattr(conversation, 'kb_id', None)
        if effective_system_prompt_id:
            try:
                from database.database import SystemPrompt
                sp = db.query(SystemPrompt).filter(SystemPrompt.system_prompt_id == effective_system_prompt_id).first()
                sys_content = sp.content if sp and sp.content else ""
                placeholder = (sp.context_placeholder or '[CONTEXT]') if sp else '[CONTEXT]'
                enable_ctx = bool(getattr(sp, 'enable_context', False)) if sp else False
                if enable_ctx and effective_kb_id:
                    # 优先使用知识库默认嵌入模型进行召回
                    try:
                        from database.database import KnowledgeBase
                        kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == effective_kb_id).first()
                        kb_embed_model = getattr(kb, 'embedding_model_name', None) if kb else None
                    except Exception:
                        kb_embed_model = None
                    # 召回
                    hits = search_similar_vectors(question, kb_id=effective_kb_id, top_k=top_k, db=db, threshold=threshold)
                    # 富化文档名
                    enriched = []
                    for h in hits:
                        info = get_document_info(h['doc_id'], db)
                        enriched.append({
                            **h,
                            'doc_name': info['doc_name'] if info else None
                        })
                    # 组装上下文文本
                    ctx_lines = []
                    for idx, h in enumerate(enriched, 1):
                        score_pct = f"{h['similarity']*100:.1f}%"
                        title = h.get('doc_name') or f"文档#{h['doc_id']}"
                        ctx_lines.append(f"[{idx}] {title} (相似度 {score_pct})\n{h['chunk_text']}")
                    context_text = "\n\n".join(ctx_lines) if ctx_lines else ""
                    # 占位符替换或尾部追加
                    if placeholder and placeholder in sys_content:
                        sys_content = sys_content.replace(placeholder, context_text)
                    elif context_text:
                        sys_content = (sys_content + "\n\n" + context_text).strip()
                    if include_citations:
                        citations_payload = { 'hits': [
                            {
                                'doc_id': h['doc_id'],
                                'doc_name': h.get('doc_name'),
                                'chunk_index': h['chunk_index'],
                                'chunk_text': h['chunk_text'],
                                'score': h['similarity']
                            } for h in enriched
                        ]}
                if sys_content:
                    conversation_history.append({"role": "system", "content": sys_content})
            except Exception as _:
                pass
        for msg in messages:
            role = "user" if msg.sender_type == 0 else "assistant"
            conversation_history.append({"role": role, "content": msg.content})
        
        # 添加当前问题
        conversation_history.append({"role": "user", "content": question})
        
        # 在首事件中带上引用（若有）
        try:
            if citations_payload is not None:
                init_event["recall"] = citations_payload
            yield f"data: {json.dumps(init_event)}\n\n"
        except Exception as init_err:
            print(f"发送初始化事件失败: {init_err}")

        # 构建请求数据
        json_data = {
            "model": use_model_name,
            "messages": conversation_history,
            "stream": True
        }
        
        # 发送请求，确保正确处理编码
        response = requests.post(
            url=API_URL,
            headers=HEADERS,
            json=json_data,
            stream=True,
            timeout=model_config["timeout"]  # 添加超时设置
        )
        response.raise_for_status()  # 检查响应状态
        
        # 检查响应编码
        if response.encoding != 'utf-8':
            response.encoding = 'utf-8'
        
        # 收集AI回复内容
        ai_content = ""
        
        # 流式处理响应（减小chunk_size加快首包到达与迭代频率）
        for line in response.iter_lines(decode_unicode=True, chunk_size=1):
            # 取消检查：若收到取消信号，终止上游连接并结束流
            if is_canceled(conversation_id):
                print(f"检测到会话 {conversation_id} 取消，关闭上游连接并结束流")
                try:
                    response.close()
                except Exception:
                    pass
                # 在取消时也保存已生成的部分内容
                try:
                    if ai_content:
                        if not isinstance(ai_content, str):
                            ai_content = str(ai_content)
                        ai_content.encode('utf-8').decode('utf-8')
                        ai_message = Message(
                            conversation_id=conversation_id,
                            sender_type=1,
                            content=ai_content,
                            sequence=user_message_count + 2,
                            citations=citations_payload.get('hits') if citations_payload else None
                        )
                        db.add(ai_message)
                        conversation.last_update_time = datetime.utcnow()
                        db.commit()
                        print("已保存取消时的部分AI内容")
                except Exception as save_err:
                    print(f"取消保存部分内容失败: {save_err}")
                clear_cancel(conversation_id)
                yield "data: [DONE]\n\n"
                return
            if line:
                content = parse_stream_chunk(line)
                if content:
                    ai_content += content
                    # 按照SSE格式返回数据
                    yield f"data: {json.dumps({'content': content, 'conversation_id': conversation_id})}\n\n"
        
        # 保存AI回复，确保内容正确编码
        if ai_content:
            # 验证和清理AI内容
            try:
                # 确保ai_content是字符串类型
                if not isinstance(ai_content, str):
                    ai_content = str(ai_content)
                
                # 验证是否包含有效的UTF-8字符
                ai_content.encode('utf-8').decode('utf-8')
                
                print(f"AI回复内容长度: {len(ai_content)}")
                print(f"AI回复内容预览: {ai_content[:100]}...")
                
                ai_message = Message(
                    conversation_id=conversation_id,
                    sender_type=1,  # 1=AI
                    content=ai_content,
                    sequence=user_message_count + 2,
                    citations=citations_payload.get('hits') if citations_payload else None
                )
                db.add(ai_message)
                
                # 更新会话最后更新时间
                conversation.last_update_time = datetime.utcnow()
                db.commit()
                print("AI回复保存成功")
                
            except UnicodeError as e:
                print(f"编码错误: {e}")
                print(f"原始内容: {repr(ai_content)}")
                # 尝试修复编码问题
                try:
                    if isinstance(ai_content, bytes):
                        ai_content = ai_content.decode('utf-8', errors='replace')
                    else:
                        ai_content = ai_content.encode('utf-8', errors='replace').decode('utf-8')
                    
                    ai_message = Message(
                        conversation_id=conversation_id,
                        sender_type=1,  # 1=AI
                        content=ai_content,
                        sequence=user_message_count + 2,
                        citations=citations_payload.get('hits') if citations_payload else None
                    )
                    db.add(ai_message)
                    conversation.last_update_time = datetime.utcnow()
                    db.commit()
                    print("AI回复保存成功（编码修复后）")
                except Exception as fix_error:
                    print(f"编码修复失败: {fix_error}")
                    raise
        
        # 发送结束标记
        yield "data: [DONE]\n\n"
        clear_cancel(conversation_id)
        
    except requests.RequestException as e:
        error_msg = f'请求错误: {str(e)}'
        print(f"请求异常: {error_msg}")
        yield f"data: {json.dumps({'error': error_msg})}\n\n"
    except Exception as e:
        error_msg = f'处理错误: {str(e)}'
        print(f"处理异常: {error_msg}")
        import traceback
        print(f"异常堆栈: {traceback.format_exc()}")
        yield f"data: {json.dumps({'error': error_msg})}\n\n"


def generate_doc_hash(doc_file: str) -> str:
    """生成文档哈希值作为唯一标识"""
    return hashlib.sha256(doc_file.encode('utf-8')).hexdigest()


# 支持可选 kb_id 的文档创建/获取

def get_or_create_document(doc_file: str, db: Session, kb_id: int = None) -> int:
    """获取或创建文档记录，返回文档ID"""
    doc_hash = generate_doc_hash(doc_file)
    doc_name = doc_file.split('\\')[-1]  # 获取文件名

    # 检查文档是否已存在（在同一知识库下以 (kb_id, doc_hash) 唯一）
    existing_doc = db.query(Document).filter(
        Document.doc_hash == doc_hash,
        Document.kb_id == kb_id
    ).first()
    if existing_doc:
        print(f"文档已存在: {existing_doc.doc_name} (ID: {existing_doc.id})")
        return existing_doc.id

    # 创建新文档记录
    new_doc = Document(doc_name=doc_name, doc_hash=doc_hash, kb_id=kb_id)
    db.add(new_doc)
    db.commit()
    db.refresh(new_doc)
    print(f"创建新文档: {new_doc.doc_name} (ID: {new_doc.id})")
    return new_doc.id


def split_into_chunks(doc_file: str, mode: str = "paragraph", params: Optional[dict] = None) -> List[str]:
    """按模式切分文本
    mode: paragraph/sentence/length/delimiter
    params:
      - delimiter: 自定义分隔符（支持 \n, \n\n 等）
      - chunk_size: 分块长度（按字符）
      - overlap: 滑窗重叠长度
    """
    params = params or {}
    with open(doc_file, 'r', encoding='UTF-8') as f:
        content = f.read()

    if mode == "delimiter":
        delimiter = params.get("delimiter", "\n\n")
        # 允许用户输入诸如 \n、\n\n 的转义
        delimiter = delimiter.encode('utf-8').decode('unicode_escape')
        chunks = [c.strip() for c in content.split(delimiter) if c.strip()]
        return chunks

    if mode == "length":
        size = int(params.get("chunk_size", 500))
        overlap = int(params.get("overlap", 0))
        chunks: List[str] = []
        i = 0
        n = len(content)
        while i < n:
            end = min(i + size, n)
            chunk = content[i:end].strip()
            if chunk:
                chunks.append(chunk)
            if end == n:
                break
            i = end - overlap if overlap < size else end
        return chunks

    if mode == "sentence":
        # 简单按句号/问号/感叹号切分
        import re
        parts = re.split(r"([。！？!?])", content)
        # 重组使标点附着在句子后
        sentences: List[str] = []
        for i in range(0, len(parts), 2):
            s = parts[i].strip()
            p = parts[i+1] if i+1 < len(parts) else ""
            if s:
                sentences.append((s + p).strip())
        return [s for s in sentences if s]

    # 默认 paragraph
    chunks = [chunk.strip() for chunk in content.split("\n\n") if chunk.strip()]
    return chunks


def embed_chunk(chunk: str, embedding_model_name: Optional[str] = None) -> List[float]:
    """获取文本块的向量嵌入，支持传入模型名"""
    model_name = embedding_model_name
    if not model_name:
        print("嵌入请求跳过：未指定嵌入模型名称")
        return []
    payload = {
        "model": model_name,
        "input": chunk
    }
    try:
        response = requests.post(embedding_model["url"], json=payload, headers=HEADERS)
        response.raise_for_status()
        response_json = response.json()
        data = response_json.get("data") or []
        if not data or not isinstance(data, list):
            print("嵌入响应为空或格式异常")
            return []
        emb = data[0].get("embedding")
        if not emb:
            print("嵌入结果缺失embedding字段")
            return []
        return emb
    except requests.exceptions.RequestException as e:
        print(f"嵌入请求失败: {e}")
        return []
    except Exception as e:
        print(f"嵌入解析失败: {e}")
        return []


def store_embeddings_to_db_batch(doc_files: List[str], db: Session = None, kb_id: int = None,
                                 embedding_model_name: Optional[str] = None,
                                 chunking: Optional[dict] = None) -> List[int]:
    """批量将文档的向量嵌入存储到数据库，支持写入指定知识库ID、嵌入模型与分块参数"""
    should_close_db = False

    try:
        doc_ids = []
        total_chunks = 0
        chunk_mode = None
        chunk_params = None
        if chunking:
            chunk_mode = chunking.get("mode")
            chunk_params = chunking.get("params")

        # 处理每个文档
        for doc_file in doc_files:
            print(f"\n处理文档: {doc_file}")

            # 获取或创建文档记录（带 kb_id）
            doc_id = get_or_create_document(doc_file, db, kb_id=kb_id)
            doc_ids.append(doc_id)

            # 切分文档
            chunks = split_into_chunks(doc_file, mode=chunk_mode or "paragraph", params=chunk_params)
            chunk_count = len(chunks)
            total_chunks += chunk_count
            print(f"文档切分成 {chunk_count} 个块")

            # 删除该文档的旧嵌入数据（如果存在）
            db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc_id).delete()

            # 更新文档的元信息
            doc_row = db.query(Document).filter(Document.id == doc_id).first()
            if doc_row:
                doc_row.embedding_model_name = embedding_model_name
                doc_row.chunk_mode = chunk_mode or "paragraph"
                doc_row.chunk_params = chunk_params or {}

            # 为每个块生成并存储向量嵌入
            success_count = 0
            for i, chunk in enumerate(chunks):
                try:
                    embedding = embed_chunk(chunk, embedding_model_name)

                    # 创建向量嵌入记录
                    vector_record = VectorEmbedding(
                        doc_id=doc_id,
                        chunk_index=i,
                        chunk_text=chunk,
                        embedding=embedding
                    )
                    db.add(vector_record)
                    success_count += 1

                    # 每10个块输出一次进度
                    if (i + 1) % 10 == 0 or (i + 1) == chunk_count:
                        print(f"  已处理 {i + 1}/{chunk_count} 个块")

                except Exception as e:
                    print(f"  处理第 {i + 1} 个块时出错: {e}")
                    continue

            print(f"文档 {doc_file} 处理完成: {success_count}/{chunk_count} 个块成功")

        db.commit()
        print(f"\n批量处理完成: {len(doc_files)} 个文档, {total_chunks} 个块")
        return doc_ids

    except Exception as e:
        db.rollback()
        raise
    finally:
        if should_close_db:
            db.close()


def search_similar_vectors(query_text: str, doc_id: int = None, kb_id: int = None, top_k: int = 5, db: Session = None, threshold: float = None) -> List[dict]:
    """根据文档ID或知识库ID和查询文本搜索相似向量"""
    should_close_db = False

    try:
        # 选择嵌入模型
        embedding_model_name: Optional[str] = None

        # 构建查询
        query = db.query(VectorEmbedding)
        if doc_id:
            # 若指定文档未启用，则直接返回空
            doc_obj = db.query(Document).filter(Document.id == doc_id).first()
            if not doc_obj or not getattr(doc_obj, 'is_enabled', True):
                return []
            embedding_model_name = getattr(doc_obj, 'embedding_model_name', None)
            query = query.filter(VectorEmbedding.doc_id == doc_id)
        elif kb_id:
            # 通过子查询过滤属于该知识库的文档
            # 仅选择启用的文档
            try:
                doc_ids = [d.id for d in db.query(Document.id).filter(Document.kb_id == kb_id, Document.is_enabled == True).all()]
            except Exception:
                # 兼容旧库未有 is_enabled 列的情况，退化为全部
                doc_ids = [d.id for d in db.query(Document.id).filter(Document.kb_id == kb_id).all()]
            if not doc_ids:
                return []
            query = query.filter(VectorEmbedding.doc_id.in_(doc_ids))
            # 从该知识库任意文档推断嵌入模型
            try:
                any_doc = db.query(Document).filter(Document.id.in_(doc_ids), Document.embedding_model_name.isnot(None)).order_by(Document.updated_at.desc()).first()
                if any_doc:
                    embedding_model_name = getattr(any_doc, 'embedding_model_name', None)
            except Exception:
                pass

        # 生成查询文本的向量嵌入（若无模型则返回空，避免报错）
        query_embedding = embed_chunk(query_text, embedding_model_name)
        if not query_embedding or not isinstance(query_embedding, list):
            return []

        # 获取所有向量记录
        vectors = query.all()

        # 计算相似度（简单的余弦相似度）
        results = []
        for vector in vectors:
            similarity = cosine_similarity(query_embedding, vector.embedding)
            # 过滤相似度为0的结果
            if similarity and similarity > 0:
                results.append({
                    'doc_id': vector.doc_id,
                    'chunk_index': vector.chunk_index,
                    'chunk_text': vector.chunk_text,
                    'similarity': similarity
                })

        # 阈值过滤
        if threshold is not None:
            try:
                thr = float(threshold)
                results = [r for r in results if r['similarity'] >= thr]
            except Exception:
                pass
        results.sort(key=lambda x: x['similarity'], reverse=True)
        return results[:top_k]

    except Exception as e:
        print(f"搜索相似向量失败: {e}")
        raise
    finally:
        if should_close_db:
            db.close()


def cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
    """计算两个向量之间的余弦相似度"""
    try:
        if vec1 is None or vec2 is None:
            return 0.0
        import math
        dot_product = sum(a * b for a, b in zip(vec1, vec2))
        magnitude1 = math.sqrt(sum(a * a for a in vec1))
        magnitude2 = math.sqrt(sum(b * b for b in vec2))

        if magnitude1 == 0 or magnitude2 == 0:
            return 0.0

        return dot_product / (magnitude1 * magnitude2)
    except Exception as e:
        print(f"计算余弦相似度失败: {e}")
        return 0.0


def get_document_info(doc_id: int, db: Session = None) -> dict:
    """获取文档信息"""
    should_close_db = False

    try:
        doc = db.query(Document).filter(Document.id == doc_id).first()
        if not doc:
            return None

        # 获取文档的向量数量
        vector_count = db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc_id).count()

        return {
            'id': doc.id,
            'doc_name': doc.doc_name,
            'doc_hash': doc.doc_hash,
            'created_at': doc.created_at,
            'updated_at': doc.updated_at,
            'vector_count': vector_count,
            'kb_id': doc.kb_id,
            'is_enabled': getattr(doc, 'is_enabled', True)
        }

    except Exception as e:
        print(f"获取文档信息失败: {e}")
        return None
    finally:
        if should_close_db:
            db.close()