import torch
from transformers import AutoTokenizer, AutoModel
import numpy as np
import os
from datetime import datetime
import os
from docx import Document
import fitz  # PyMuPDF
import time
import faiss
from faiss import normalize_L2
import glob
import gradio as gr
import shutil
import requests
import json
import PyPDF2
from docx import Document
import docx2txt
from functools import partial
import warnings
from urllib3.exceptions import InsecureRequestWarning
import uvicorn
from fastapi import FastAPI, Query, Path, Body
from fastapi.responses import JSONResponse
# 忽略 InsecureRequestWarning
warnings.filterwarnings("ignore", category=InsecureRequestWarning)


print('module are imported')

# 加载本地模型和分词器
model_name = "./model/tao8k"  # 替换为你的本地模型路径
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)

# 确保模型在CPU上运行（如果需要GPU，可以替换为"cuda"）
device = torch.device("cpu")
model.to(device)
depts=['IT','UPP','Security','质量','通用']
# 定义大模型 API 的 URL
MODEL_API_URL = "http://192.168.198.227:8000/v1/chat/completions"
##    print(0.01)
##    # 定义大模型 API 的 API Key
API_KEY = "app-vAC1Zc3QOkFpuBw1Uw3Bd7q5"
company='XX中国'

fastapi_app = FastAPI()

# 原有的路由
@fastapi_app.get("/chat")
def read_main():
    return {"message": "This is your main app"}

# 新增路由：返回静态数据
@fastapi_app.get("/about")
def about():
    return {"app_name": "FastAPI with Gradio", "version": "1.0"}

# 新增路由：处理查询参数
@fastapi_app.get("/items/")
def read_item(item_id: int = Query(..., title="Item ID", description="The ID of the item to get")):
    return {"item_id": item_id}

# 新增路由：处理路径参数
@fastapi_app.get("/users/{user_id}")
def read_user(user_id: int = Path(..., title="User ID", description="The ID of the user to get")):
    return {"user_id": user_id}

# 新增路由：接收请求体
@fastapi_app.post("/post-data/")
def post_data(data: dict = Body(..., title="Data", description="Data to be processed")):
    return {"received_data": data}

@fastapi_app.post("/retrieval")
def retrieval(request: dict):
    # 验证 API 密钥
    #verify_api_key(request)

    # 解析请求体
    #knowledge_id = request.get("json", {}).get("knowledge_id")
    #print('request********',type(request),request)
    query = request['query']
    #retrieval_setting = request.get("json", {}).get("retrieval_setting", {})

    # 获取检索参数
    #top_k = retrieval_setting.get("top_k", 2)
    #score_threshold = retrieval_setting.get("score_threshold", 0.5)
    score_threshold =0.5
    #print('retrieval query',query,type(query))
    #a=input('a')
    mock_knowledge_base=api(query,"./db/通用/",2)
    #print('%%%%%%%%%%%%%%%%%%')
    # 模拟检索逻辑
    filtered_records = [
        record for record in mock_knowledge_base
        if record["score"] >= score_threshold
    ]

    # 返回前 top_k 条记录
    return JSONResponse(content={"records": filtered_records[:2]})
    #return {"records": filtered_records[:2]}


def split_text_into_paragraphs(file_path):
    """
    输入文件路径，根据文件类型读取内容并分段。
    每段长度在100到300字之间。
    支持文件类型：.docx, .pdf, .txt
    返回分段后的文本列表。
    """
    # 检查文件是否存在
    if not os.path.exists(file_path):
        raise FileNotFoundError(f"文件不存在，请检查路径：{file_path}")

    # 根据文件扩展名选择读取方式
    _, file_extension = os.path.splitext(file_path)
    file_extension = file_extension.lower()

    if file_extension == ".docx":
        # 读取docx文件
        doc = Document(file_path)
        #text = "\n".join([para.text for para in doc.paragraphs])
        text = docx2txt.process(file_path)
    elif file_extension == ".pdf":
        # 读取pdf文件
        doc = fitz.open(file_path)
        text = ""
        for page_num in range(len(doc)):
            page = doc.load_page(page_num)
            text += page.get_text() + "\n"
    elif file_extension == ".txt":
        # 读取txt文件
        with open(file_path, "r", encoding="utf-8") as f:
            text = f.read()
    else:
        raise ValueError(f"不支持的文件类型：{file_extension}")

    # 清理文本（去除多余的空格和换行）
    text = text.strip().replace("\n", " ").replace("\r", " ")
    text = " ".join(text.split())  # 去除多余的空格

    # 分段处理
    paragraphs = []
    current_paragraph = ""
    words = text.split()

    for word in words:
        if len(current_paragraph) + len(word) + 1 <= 600:
            current_paragraph += word + " "
        else:
            if len(current_paragraph) >= 400:
                paragraphs.append(current_paragraph.strip())
            current_paragraph = word + " "

    # 添加最后一段
    if len(current_paragraph) >= 400:
        paragraphs.append(current_paragraph.strip())

    return paragraphs


def format_timestamp(timestamp=None, format_str="%Y-%m-%d_%H%M%S"):
    """
    将时间戳格式化为指定的日期和时间字符串。
    
    参数:
        timestamp (float, optional): 时间戳（秒级）。如果为 None，则使用当前时间戳。
        format_str (str, optional): 格式化字符串，默认为 "%Y-%m-%d %H:%M:%S"。
    
    返回:
        str: 格式化后的日期和时间字符串。
    """
    if timestamp is None:
        # 如果没有提供时间戳，则使用当前时间
        dt = datetime.now()
    else:
        # 将传入的时间戳转换为 datetime 对象
        dt = datetime.fromtimestamp(timestamp)
    
    # 格式化为指定的日期和时间字符串
    formatted_date_time = dt.strftime(format_str)
    return formatted_date_time
# 文本向量化函数
def get_text_embedding(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
    inputs = inputs.to(device)
    with torch.no_grad():
        outputs = model(**inputs)
        embeddings = outputs.last_hidden_state.mean(dim=1)  # 取最后一层的平均嵌入作为文本嵌入
        embeddings = embeddings.cpu().numpy()  # 转换为 NumPy 数组
    return embeddings

# 持久化嵌入向量到文件
def save_embedding(embedding, file_path):
    np.save(file_path, embedding)  # 使用 NumPy 保存嵌入向量为.npy文件

# 封装函数：输入文本，输出嵌入向量文件路径
def text_to_embedding_file(text, filename,radio_button,save_dir="db/"):
    # 创建保存嵌入向量的目录
    os.makedirs(save_dir, exist_ok=True)
    
    # 获取嵌入向量
    embedding = get_text_embedding(text)
    
    # 生成文件路径并保存嵌入向量
    file_name = "{}_{}.npy".format(filename,format_timestamp())  # 使用文本的哈希值作为文件名，确保唯一性
    file_path = os.path.join(save_dir+radio_button, file_name)
    save_embedding(embedding, file_path)
    
    txt_file=os.path.join(save_dir+radio_button, file_name.replace('.npy','.txt'))
    
    with open(txt_file, "w", encoding="utf-8") as f:
        f.write(text)
    time.sleep(0.1)
    print(f"嵌入向量已保存到 {file_path}")
    return file_path

# 加载嵌入向量
def load_embedding(file_path):
    print('file_path',file_path)
    return np.load(file_path)

# 示例用法
# 示例用法



def compute_similarity(query_embedding, embeddings_index, top_n=3):
    """
    使用 FAISS 索引计算查询向量与嵌入向量的相似度。
    :param query_embedding: 查询向量（形状应为 [1, D]，数据类型为 float32）
    :param embeddings_index: FAISS 索引
    :param top_n: 返回最相似的 top_n 个结果
    :return: 最相似的索引和相似度
    """
    # 确保查询向量的形状为 [1, D]，数据类型为 float32
    assert query_embedding.ndim == 2 and query_embedding.shape[0] == 1, "查询向量的形状应为 [1, D]"
    assert query_embedding.dtype == np.float32, "查询向量的数据类型应为 float32"
    
    # 归一化查询向量
    normalize_L2(query_embedding)
    
    # 搜索最相似的 top_n 个向量
    D, I = embeddings_index.search(query_embedding, top_n)
    return I[0], D[0]

def load_index(embeddings_dir):
    # 加载嵌入向量并构建 FAISS 索引
    embeddings_paths = glob.glob(os.path.join(embeddings_dir, "*.npy"))
    #print(2)
    #print('******0')
    #print(embeddings_paths)
    #embeddings = np.vstack([np.load(path) for path in embeddings_paths]).astype(np.float32)
    # 初始化一个空列表来存储加载的嵌入向量
    loaded_embeddings = []

    # 遍历所有嵌入向量文件路径
    for path in embeddings_paths:
        #print(path)
        # 加载每个路径对应的嵌入向量
        embedding = np.load(path)
        #print(embedding)
        # 将加载的嵌入向量添加到列表中
        loaded_embeddings.append(embedding)

    # 将所有嵌入向量垂直堆叠成一个大的矩阵
    #print(3)
    embeddings = np.vstack(loaded_embeddings)
    #print(4)

    # 将堆叠后的矩阵转换为浮点数类型（32位）
    embeddings = embeddings.astype(np.float32)
    #print(5)
    #print('******1')
    # 归一化嵌入向量
    normalize_L2(embeddings)
    #print(6)
    #print('******2')
    
    # 创建 FAISS 索引（使用内积计算相似度）
    index = faiss.IndexFlatIP(embeddings.shape[1])
    #print(7)
    #print('******3')
    index.add(embeddings)
    return index

def api(query_text,embeddings_dir="./db/通用",top_n=2):
    query_embedding = get_text_embedding(query_text).reshape(1, -1).astype(np.float32)
    index=index_default
    dept=embeddings_dir.split('/')[2]
    #print('embeddings_dir',embeddings_dir)
    #print('npy path')
    #b=input('b')
    #print(glob.glob(os.path.join(embeddings_dir, "*.npy")))
    embeddings_paths = glob.glob(os.path.join(embeddings_dir, "*.npy"))
    #print('embeddings_paths')
    #a=input('a')
    top_indices, similarities = compute_similarity(query_embedding, index, top_n)
    #print('xxxx',top_indices, similarities)
    file=embeddings_paths[top_indices[0]].replace('.npy','.txt').replace('\\','/')
    #print('file',file)
    file1=embeddings_paths[top_indices[1]].replace('.npy','.txt').replace('\\','/')
    #print('file,file1',file,file1)
    with open(file, "r", encoding="utf-8") as file:  # 使用 "r" 模式打开文件，表示只读
        content = file.read()
    with open(file1, "r", encoding="utf-8") as file1:  # 使用 "r" 模式打开文件，表示只读
        content1 = file1.read() 
    #print('top_indices[0]',top_indices[0],'similarities[0]',similarities[0],file)
    #print(10)    
    if similarities[0]<=0.57:
        return '未找到相关文档:'+query_text
    #print('content',content)
    filename0=get_filename(str(file))
    #print('**filename0=,file',filename0,file)
    
    filename1=get_filename(str(file1))
    #print('**filename1,file1',filename1,file1)
    #print('filename0,filename1',filename0,filename1)
    dict_response={}
    dict_response['question']=query_text
    dict_response['dept']=dept
    dict_response['answer0_filename']=filename0
    dict_response['answer1_filename']=filename1
    dict_response['answer0_content']=content
    dict_response['answer1_content']=content1
    dict_response['answer0_link']=' ## <a href="/gradio_api/file=static/{}/{}" target="_blank">'.format(dept,filename0)
    dict_response['answer1_link']=' ## <a href="/gradio_api/file=static/{}/{}" target="_blank">'.format(dept,filename1)
    #print('dict_response:::',dict_response)
    # 模拟知识库数据
    mock_knowledge_base = [
        {
            "metadata": {
                "path": dict_response['answer0_link'],
                "description": filename0
            },
            "score": float(similarities[0]),
            "title": filename0,
            "content": dict_response['answer0_content']
        },
        {
            "metadata": {
                "path": dict_response['answer1_link'],
                "description": filename1
            },
            "score": float(similarities[1]),
            "title": filename1,
            "content": dict_response['answer1_content']
        }
    ]
    #print('mock_knowledge_base:::',mock_knowledge_base)
    return mock_knowledge_base
    
def query(query_text, embeddings_dir, top_n=2):
    print(0)
    dept=embeddings_dir.split('/')[2]
    # 获取查询文本的嵌入向量
    query_embedding = get_text_embedding(query_text).reshape(1, -1).astype(np.float32)
    # 加载嵌入向量并构建 FAISS 索引
    embeddings_paths = glob.glob(os.path.join(embeddings_dir, "*.npy"))
    if dept=='通用':
        index=index_default
        #print(index,type(index))
    else:
        #print('embeddings_dir,dept',embeddings_dir,dept)
        """
        查询与给定文本最相似的嵌入向量。
        :param query_text: 查询文本
        :param embeddings_dir: 嵌入向量存储目录
        :param top_n: 返回最相似的 top_n 个结果
        :return: 最相似的索引和相似度
        """

        #print(1)
        

        #print(2)
        #print('******0')
        #print(embeddings_paths)
        #embeddings = np.vstack([np.load(path) for path in embeddings_paths]).astype(np.float32)
        # 初始化一个空列表来存储加载的嵌入向量
        loaded_embeddings = []

        # 遍历所有嵌入向量文件路径
        for path in embeddings_paths:
            #print(path)
            # 加载每个路径对应的嵌入向量
            embedding = np.load(path)
            #print(embedding)
            # 将加载的嵌入向量添加到列表中
            loaded_embeddings.append(embedding)

        # 将所有嵌入向量垂直堆叠成一个大的矩阵
        #print(3)
        embeddings = np.vstack(loaded_embeddings)
        #print(4)

        # 将堆叠后的矩阵转换为浮点数类型（32位）
        embeddings = embeddings.astype(np.float32)
        #print(5)
        #print('******1')
        # 归一化嵌入向量
        normalize_L2(embeddings)
        #print(6)
        #print('******2')
        
        # 创建 FAISS 索引（使用内积计算相似度）
        index = faiss.IndexFlatIP(embeddings.shape[1])
        #print(7)
        #print('******3')
        index.add(embeddings)
        #print('******4')
        #print(8)
    # 确保查询向量的形状和数据类型正确
    #assert query_embedding.shape[1] == embeddings.shape[1], "查询向量的维度与嵌入向量的维度不一致"
    #print(9)
    #print('******5')
    
    # 计算相似度并返回最相似的 top_n 个结果
    top_indices, similarities = compute_similarity(query_embedding, index, top_n)
    #print(10,top_indices, similarities)
    #print('xxxx',top_indices, similarities)
    file=embeddings_paths[top_indices[0]].replace('.npy','.txt').replace('\\','/')
    #print('file',file)
    file1=embeddings_paths[top_indices[1]].replace('.npy','.txt').replace('\\','/')
    #print('file,file1',file,file1)
    with open(file, "r", encoding="utf-8") as file:  # 使用 "r" 模式打开文件，表示只读
        content = file.read()
    with open(file1, "r", encoding="utf-8") as file1:  # 使用 "r" 模式打开文件，表示只读
        content1 = file1.read() 
    #print('top_indices[0]',top_indices[0],'similarities[0]',similarities[0],file)
    #print(10)    
    if similarities[0]<=0.57:
        return '未找到相关文档:'+query_text
    #print('content',content)
    filename0=get_filename(str(file))
    #print('**filename0=,file',filename0,file)
    
    filename1=get_filename(str(file1))
    #print('**filename1,file1',filename1,file1)
    #print('filename0,filename1',filename0,filename1)
    dict_response={}
    dict_response['question']=query_text
    dict_response['dept']=dept
    dict_response['answer0_filename']=filename0
    dict_response['answer1_filename']=filename1
    dict_response['answer0_content']=content
    dict_response['answer1_content']=content1
    dict_response['answer0_link']=' ## <a href="/gradio_api/file=static/{}/{}" target="_blank">'.format(dept,filename0)
    dict_response['answer1_link']=' ## <a href="/gradio_api/file=static/{}/{}" target="_blank">'.format(dept,filename1)
    #print(12)
    #return top_indices, similarities
    return dict_response
    #return '问题:'+query_text+'<br>资料:<br> ## 文件名:'+' ## <a href="/gradio_api/file=static/{}/{}" target="_blank">'.format(dept,filename0)+filename0+'</a><br>'+content+'<br>'+'<br> ## 文件名:'+' ## <a href="/gradio_api/file=static/{}/{}" target="_blank">'.format(dept,filename1)+filename1+'</a>'+content1+'<br>'



# 示例：获取文本嵌入向量的函数（需要根据实际情况实现）
def get_text_embedding(text):
    # 示例：使用 Sentence Transformers 生成嵌入向量
    from sentence_transformers import SentenceTransformer
    model = SentenceTransformer(model_name)  # 使用预训练模型
    embedding = model.encode([text], show_progress_bar=False)
    return embedding.reshape(1, -1).astype(np.float32)  # 确保形状为 [1, D]，数据类型为 float32

# 主函数：接收文件并保存到 static 目录
def process_files(files,radio_button):
    #print('radio_button',radio_button)
    if not radio_button:
        return "请先选择一个部门！"
    ensure_directory_exists('static/'+radio_button)
    ensure_directory_exists('db/'+radio_button)
    #a=input('a')
    saved_files = []
    for file in files:
        # 获取文件名
        file_name = os.path.basename(file.name)  # 确保只获取文件名，而不是临时路径
        file_extension = os.path.splitext(file_name)[1].lower()
        
        if file_extension not in [".txt", ".docx", ".pdf"]:
            return f"文件 {file_name} 不支持的文件类型。仅支持 .txt, .docx 和 .pdf 文件。"
        
        # 获取临时文件的完整路径
        temp_file_path = file.name

        # 设置目标文件路径
        save_path = os.path.join("static/"+radio_button, file_name)

        # 检查是否为同一个文件（避免重复保存）
        if os.path.abspath(temp_file_path) != os.path.abspath(save_path):
            shutil.copy(temp_file_path, save_path)
            saved_files.append(save_path)
        else:
            saved_files.append(f"文件 {file_name} 已存在于目标目录，跳过保存。")
        file_name=save_path
        file_name0=os.path.basename(file_name)
        paragraphs = split_text_into_paragraphs(file_name)
        for item in paragraphs:
            text_to_embedding_file(item,file_name0,radio_button)
    global index_default
    index_default=load_index('db/通用')
    return f"文件已成功保存到目录：{saved_files}"

# 请求参数
def query_model(prompt):
    #print('prompt**',prompt)
    # 您的 Ollama 模型 API URL
    #MODEL_API_URL = "http://192.168.198.227:8000/v1/chat/completions"
    MODEL_API_URL = "https://192.168.198.137/v1"
    MODEL_API_URL = "https://192.168.198.137/v1/chat-messages"
    API_KEY = "app-vAC1Zc3QOkFpuBw1Uw3Bd7q5"
    APP_ID = "37718c73-09ad-410f-b23d-0a5915394e8c"
    USER_ID = "your_user_id_here"

    messages = [
        {"role": "system", "content": "智能助手"},
        #{"role": "user", "content": "重复收到的问题，结合内容做出回复"},
        {"role": "user", "content": prompt}
    ]
    payload = {
        "inputs": {},
        "model": "deepseek-r1:70b",
        #"messages": messages,
        "stream": True,
        "max_tokens": 768,
        "temperature": 0.3,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {"type": "text"},
        "response_mode": "streaming",
        "num_thread": 4,
        "app_id": APP_ID,
        "user": USER_ID,
        "query":prompt,
        
    }

    headers = {
        "Authorization": f"Bearer { API_KEY }",
        "Content-Type": "application/json"
    }
    try:
        response = requests.post(MODEL_API_URL, json=payload, headers=headers, stream=True,verify=False,timeout=1000)
        #print('response:',response)
        response.raise_for_status()  # 检查请求是否成功
        #print('response:',response)
        return response
    except requests.exceptions.RequestException as e:
        #print(e)
        return f"Error: {str(e)}"

# Gradio 接口
def gradio_interface(prompt,dept):
    
    
    #prompt=[prompt][0]
    #dept=[prompt][1]
    #print(dept)
    #a=input('a')
    #print('prompt',prompt)
    dict_response=query(prompt,'./db/'+dept,top_n=2)
    #print(13)
    #print('dict_response',dict_response,len(dict_response),type(dict_response))
    #print(13.1)
    if isinstance(dict_response, str):
        prompt0=prompt
        prompt1=prompt
        #print(13.2)
    else:
        prompt1=' ### 问题:'+dict_response['question']+'<br>'+dict_response['answer0_link']+dict_response['answer0_filename']+'</a><br>'+dict_response['answer0_content']+\
                 '<br>'+dict_response['answer1_link']+dict_response['answer1_filename']+'</a><br>'+dict_response['answer1_content']+'</a><br>'
        #print(13.3)
        prompt0 = '问题: '+dict_response['question']+' 资料：'+dict_response['answer0_content']+' '+dict_response['answer1_content'].replace('.','')
        #print(13.4)
    
    #print(14)
    yield prompt1+"<br>*****************************请等待AI助手分析******************************************************</br>"
    #print(14.1)
    
##    response_generator = call_model_api_stream(prompt)
####
####    # 打印流式响应
##    for response in response_generator:
##        print(response)
    response = query_model(prompt0)
    if isinstance(response, str):  # 如果返回的是错误信息
        return response

    # 逐步解析流式响应并提取内容
    full_response = prompt1+'<br>'
    #print(15)
    for line in response.iter_lines():
        #print('line##',line)
        if line:
            decoded_line = line.decode("utf-8")
            #print('decoded_line##',decoded_line)
            if decoded_line.startswith("data: "):
                #print('**')
                try:
                    data = json.loads(decoded_line[6:])  # 去掉前缀 "data: "
                    #print('newdata',data)
                    if "answer" in data: 
                        #print(data['answer'])
                        full_response=full_response+data['answer']
                    elif "choices" in data and data["choices"]:
                        delta = data["choices"][0].get("delta", {})
                        content = delta.get("content", "")
                        full_response += content
                    yield full_response
                except:
                    #print('except')
                    continue
            else:
                #print('else##')
                yield full_response
    return full_response

### 定义切换到页面B的函数
##def switch_to_page_b():
##    return gr.update(visible=False), gr.update(visible=True)
##
##
### 定义切换到页面A的函数
##def switch_to_page_a():
##    return gr.update(visible=True), gr.update(visible=False)






# 使用 gr.Blocks 创建界面
with gr.Blocks(title="智能AI知识库") as demo:
    
    with gr.Column(visible=True) as page_a:
        gr.Markdown("# 中国智能AI知识库")
        #input_text = gr.Textbox(lines=1, placeholder="直接敲回车键")  # 设置为单行输入
        #output_text = gr.Textbox(lines=10, placeholder="Model response will appear here...")
        #output_text=gr.Markdown()

        # 绑定回车键提交事件
        #input_text.submit(gradio_interface, inputs=input_text, outputs=output_text)
        #button_to_b = gr.Button("上传知识文档docx,pdf,txt")
        with gr.Tab("通用"):

            input_text = gr.Textbox(lines=1, placeholder="直接敲回车键")  # 设置为单行输入

            output_text=gr.Markdown()

            process_text_with_dept = partial(gradio_interface, dept="通用")

            # 绑定回车键提交事件
            input_text.submit(process_text_with_dept, inputs=[input_text], outputs=output_text)
        with gr.Tab("IT"):

            input_text = gr.Textbox(lines=1, placeholder="直接敲回车键")  # 设置为单行输入

            output_text=gr.Markdown()

            process_text_with_dept = partial(gradio_interface, dept="IT")

            # 绑定回车键提交事件
            input_text.submit(process_text_with_dept, inputs=[input_text], outputs=output_text)
            
        with gr.Tab("质量"):

            input_text = gr.Textbox(lines=1, placeholder="直接敲回车键")  # 设置为单行输入

            output_text=gr.Markdown()

            process_text_with_dept = partial(gradio_interface, dept="质量")

            # 绑定回车键提交事件
            input_text.submit(process_text_with_dept, inputs=[input_text], outputs=output_text)
            
        with gr.Tab("UPP"):

            input_text = gr.Textbox(lines=1, placeholder="直接敲回车键")  # 设置为单行输入

            output_text=gr.Markdown()

            process_text_with_dept = partial(gradio_interface, dept="UPP")

            # 绑定回车键提交事件
            input_text.submit(process_text_with_dept, inputs=[input_text], outputs=output_text)
            
        with gr.Tab("Security"):

            input_text = gr.Textbox(lines=1, placeholder="直接敲回车键")  # 设置为单行输入

            output_text=gr.Markdown()

            process_text_with_dept = partial(gradio_interface, dept="Security")

            # 绑定回车键提交事件
            input_text.submit(process_text_with_dept, inputs=[input_text], outputs=output_text)
        with gr.Tab("上传文件"):
            gr.Markdown("欢迎来到页面B！")
            gr.Markdown("这是一个简单的页面B。")

            # 新增 Radio 按钮
            radio_button = gr.Radio(choices=depts, label="部门选择",value='通用')

            # 创建 Gradio 接口
            iface = gr.Interface(
                fn=process_files,
                inputs=[
                gr.Files(label="上传文件", file_types=[".docx", ".txt", ".pdf"]),
                radio_button  # 将 Radio 按钮作为输入传递
                ],
                outputs="text",  # 显示保存结果
                title="文件上传工具",
                description="上传文件后，文件将被保存到服务器的 static 目录下。"
            )


       
        # 定义页面B
    with gr.Column(visible=False) as page_b:
        gr.Markdown("欢迎来到页面B！")
        gr.Markdown("这是一个简单的页面B。")

        # 新增 Radio 按钮
        radio_button = gr.Radio(choices=depts, label="部门选择")

        # 创建 Gradio 接口
        iface = gr.Interface(
            fn=process_files,
            inputs=[
            gr.Files(label="上传文件", file_types=[".docx", ".txt", ".pdf"]),
            radio_button  # 将 Radio 按钮作为输入传递
            ],
            outputs="text",  # 显示保存结果
            title="文件上传工具",
            description="上传文件后，文件将被保存到服务器的 static 目录下。"
        )

        button_to_a = gr.Button("返回知识库查询")






    # 绑定按钮的点击事件
    #button_to_b.click(switch_to_page_b, outputs=[page_a, page_b])
    #button_to_a.click(switch_to_page_a, outputs=[page_a, page_b])



def get_filename(fileext):
    
    filename_with_extension = os.path.basename(fileext)

    # 分离文件名和扩展名
    filename_without_extension = os.path.splitext(filename_with_extension)[0].split("_")[0]
    #print('get_filename:',fileext,filename_without_extension)
    return filename_without_extension

def ensure_directory_exists(directory_path):
    """
    检查指定的文件夹是否存在，如果不存在则创建它。
    
    参数:
        directory_path (str): 要检查或创建的目录路径。
        
    返回:
        str: 返回提供的目录路径。
    """
    if not os.path.exists(directory_path):
        os.makedirs(directory_path)
        print(f"已创建目录: {directory_path}")
    else:
        print(f"目录已存在: {directory_path}")
    return directory_path


if __name__ == "__main__":


    # 模拟的 API 密钥

    print(0)
    ensure_directory_exists('db')
    ensure_directory_exists('static')
    print(0.1)
    index_default=load_index('db/通用')
    print(0.2)
    #demo.launch(server_name="0.0.0.0", server_port=7860,allowed_paths=['static'])
    
    app = gr.mount_gradio_app(fastapi_app, demo, path="/")
    uvicorn.run(fastapi_app, host="0.0.0.0",port=7860)

