import os
import tempfile
import argparse
from typing import List, Tuple

# PDF处理
from PyPDF2 import PdfReader

# 向量存储和检索
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings

# Ollama集成
from langchain_community.llms import Ollama

# LangChain组件
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate

from flask_cors import CORS  # 导入 CORS
import json
from langchain_ollama import ChatOllama
from flask import Flask, request, Response
from langchain_community.document_loaders.csv_loader import CSVLoader



def extract_text_from_pdf(pdf_path: str) -> str:
    """1. 从PDF文件中提取文本内容"""
    pdf_reader = PdfReader(pdf_path)
    text = ""
    for page in pdf_reader.pages:
        text += page.extract_text()
    return text

def load_csv_data(csv_path: str) -> str:
    """1. 从CSV文件中加载并提取文本内容"""
    # 初始化CSV加载器
    loader = CSVLoader(file_path=csv_path)    
    # 加载文档
    documents = loader.load() 
    combined_text = ''   
    # 提取所有文档的文本内容
    for doc in documents:
        tmp = doc.page_content # 将文本内容合并为一个字符串
        combined_text = combined_text + '\n' + tmp.replace("\n", ", ")        
    return combined_text

def split_text(text: str) -> List[str]:
    """2. 将文本分割成块"""
    # text_splitter = RecursiveCharacterTextSplitter(
    #     chunk_size=500,
    #     chunk_overlap=200,
    #     length_function=len,
    # )
    # chunks = text_splitter.split_text(text)
    chunks = text.split('\n')
    print(f"分割后的文本数={len(chunks)}")
    for txt in chunks:
        print(txt)
        print("\n\n")
    return chunks


def create_vector_store(text_chunks: List[str]) -> Tuple[Chroma, HuggingFaceEmbeddings]:
    """3. 创建向量存储"""
    # 使用HuggingFace嵌入模型
    embeddings = HuggingFaceEmbeddings(
        model_name="shibing624/text2vec-base-chinese",  # 也可以选择其他模型
        model_kwargs={'device': 'cpu'}
    )
    
    # 创建临时目录存储向量数据库
    persist_directory = tempfile.mkdtemp()
    
    # 创建向量存储
    vectordb = Chroma.from_texts(
        texts=text_chunks,
        embedding=embeddings,
        persist_directory=persist_directory
    )
    
    return vectordb, embeddings


def setup_qa_chain(vectordb: Chroma) -> RetrievalQA:
    """设置问答链"""
    # 创建Ollama LLM实例
    llm = Ollama(model=MODEL_NAME)  # 或其他你在Ollama中部署的模型
    
    # 自定义提示模板
    template = """使用以下上下文来回答最后的问题。如果你不知道答案，只需说你不知道，不要试图编造答案。
        上下文: {context}
        问题: {question}
        回答: """
    
    QA_PROMPT = PromptTemplate(
        template=template,
        input_variables=["context", "question"]
    )
    
    # 创建问答链
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="stuff",
        retriever=vectordb.as_retriever(search_kwargs={"k": 3}),
        return_source_documents=True,
        chain_type_kwargs={"prompt": QA_PROMPT}
    )
    
    return qa_chain


app = Flask(__name__)
CORS(app)  # 启用 CORS

# 定义模型名称
# MODEL_NAME = "deepseek-r1:8b"
MODEL_NAME="qwen2.5:14b-instruct-q3_K_S"


# 提取文本
# pdf_text = extract_text_from_pdf("E:/code/ai.llm/报告/AIGC+ 软件开发新范式.pdf")
# print(f"提取了 {len(pdf_text)} 个字符")

cvs_text = load_csv_data('E:/罗湖项目/tmp.csv')

# # 分割文本
text_chunks = split_text(cvs_text)

# # 创建向量存储
vectordb, _ = create_vector_store(text_chunks)

# # 设置问答链
qa_chain = setup_qa_chain(vectordb)
# print("问答系统已准备就绪")



# 定义流式响应函数
def generate_response(question):
    try:
        response_str = ''
        for chunk in qa_chain.stream({"query": question}):
            try:
                # 处理不同类型的响应块
                if isinstance(chunk, dict):
                    # 如果chunk是字典，尝试提取内容
                    if 'result' in chunk:
                        content = chunk['result']
                    else:
                        content = chunk.get('content', str(chunk))
                else:
                    # 如果是其他类型，直接转为字符串
                    content = str(chunk)
                
                # 确保换行符被正确处理
                content = content.replace('\\n', '\n')
                content = content.replace('\\r', '\n')
                print(content)
                
                # 累加响应内容
                response_str += content                
                if content:
                    # 使用JSON编码确保特殊字符被正确处理
                    formatted_content = json.dumps({
                        "model": MODEL_NAME,
                        "created_at": "",
                        "message": {
                            "role": "assistant",
                            "content": content
                        },
                        "done": False
                    })
                    yield f"{formatted_content}\n\n"
            
            except Exception as e:
                print(f"处理块异常：{str(e)}")
                yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
        print(f"完整响应: {response_str}")
            
    except Exception as e:
        print(f"流式响应异常：{str(e)}")
        yield f"data: {json.dumps({'error': str(e)})}\n\n"


@app.route('/ask', methods=['POST'])
def ask():
    data = request.get_json()
    question = data.get('messages', '')
    
    if not question:
        return {"status": "error", "message": "问题不能为空"}, 400
    
    return Response(generate_response(question), mimetype='text/event-stream')

if __name__ == "__main__":
    app.run(host='127.0.0.1', port=8002) 