from fastapi import FastAPI, Request, UploadFile, File, HTTPException, Response
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
import yaml
from openai import OpenAI
from pydantic import BaseModel
from typing import Optional, Union
from paddleocr import PaddleOCR, PPStructure
import cv2
import numpy as np
import os
import io
from PIL import Image
import fitz  # PyMuPDF

app = FastAPI()

# 设置静态文件和模板
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")

# 读取配置文件
with open('config.yaml', 'r') as f:
    config = yaml.safe_load(f)

# 添加根路径路由 - 主页
@app.get("/")
async def root(request: Request):
    return templates.TemplateResponse(
        "root.html",
        {
            "request": request
        }
    )

# OCR 文字识别页面
@app.get("/upload")
async def ocr_page(request: Request):
    return templates.TemplateResponse(
        "index.html",
        {
            "request": request
        }
    )

# PDF 文字识别页面
@app.get("/upload_pdf")
async def pdf_page(request: Request):
    return templates.TemplateResponse(
        "index_pdf.html",
        {
            "request": request
        }
    )

# 表格识别页面
@app.get("/upload_table")
async def table_page(request: Request):
    return templates.TemplateResponse(
        "index_table.html",
        {
            "request": request
        }
    )

# PPT 生成页面
@app.get("/upload_ppt")
async def ppt_page(request: Request):
    return templates.TemplateResponse(
        "index_ppt.html",
        {
            "request": request
        }
    )

# 文生图页面
@app.get("/word2image")
async def word2image_page(request: Request):
    return templates.TemplateResponse(
        "index_word2image.html",
        {
            "request": request
        }
    )

# 文本优化页面
@app.get("/optimize")
async def optimize_page(request: Request):
    return templates.TemplateResponse(
        "index_optimize.html",
        {
            "request": request
        }
    )

# 通义千问 API 页面
@app.get("/api_ali")
async def api_ali_page(request: Request):
    return templates.TemplateResponse(
        "index_api_ali.html",
        {
            "request": request
        }
    )

# 添加请求模型
class ChatRequest(BaseModel):
    message: str
    messages: list = []
    file_content: Optional[str] = None

# OCR API 端点
class OCRResponse(BaseModel):
    text: list
    confidence: list = []

@app.post("/api/ocr")
async def ocr_api(file: UploadFile = File(...)):
    try:
        # 读取文件内容
        content = await file.read()
        
        # 将文件内容转换为 OpenCV 图像格式
        nparr = np.frombuffer(content, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        
        # 检查图像是否成功加载
        if img is None:
            print("图像加载失败")
            return {"text": ["图像加载失败，请检查文件格式"], "confidence": [0.0]}
        
        # 打印图像信息，用于调试
        print(f"图像尺寸: {img.shape}")
        
        # 保存原始图像用于调试
        debug_dir = "debug_images"
        os.makedirs(debug_dir, exist_ok=True)
        cv2.imwrite(os.path.join(debug_dir, "original.jpg"), img)
        
        # 图像预处理
        preprocessed_images = []
        
        # 1. 原始图像
        preprocessed_images.append(("original", img))
        
        # 2. 灰度图像
        try:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            cv2.imwrite(os.path.join(debug_dir, "gray.jpg"), gray)
            # 灰度图像需要转换回 3 通道才能用于 PaddleOCR
            gray_3channel = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
            preprocessed_images.append(("gray", gray_3channel))
        except Exception as e:
            print(f"灰度转换失败: {str(e)}")
        
        # 3. 自适应阈值二值化
        try:
            binary = cv2.adaptiveThreshold(
                gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 
                cv2.THRESH_BINARY, 11, 2
            )
            cv2.imwrite(os.path.join(debug_dir, "binary.jpg"), binary)
            # 二值图像需要转换回 3 通道才能用于 PaddleOCR
            binary_3channel = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)
            preprocessed_images.append(("binary", binary_3channel))
        except Exception as e:
            print(f"二值化失败: {str(e)}")
        
        # 4. 全局阈值二值化
        try:
            _, global_binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            cv2.imwrite(os.path.join(debug_dir, "global_binary.jpg"), global_binary)
            # 二值图像需要转换回 3 通道才能用于 PaddleOCR
            global_binary_3channel = cv2.cvtColor(global_binary, cv2.COLOR_GRAY2BGR)
            preprocessed_images.append(("global_binary", global_binary_3channel))
        except Exception as e:
            print(f"全局二值化失败: {str(e)}")
        
        # 5. 反色二值化（白底黑字变黑底白字）
        try:
            inverted = cv2.bitwise_not(gray)
            cv2.imwrite(os.path.join(debug_dir, "inverted.jpg"), inverted)
            # 反色图像需要转换回 3 通道才能用于 PaddleOCR
            inverted_3channel = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
            preprocessed_images.append(("inverted", inverted_3channel))
        except Exception as e:
            print(f"反色失败: {str(e)}")
        
        # 6. 锐化
        try:
            kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
            sharpened = cv2.filter2D(img, -1, kernel)
            cv2.imwrite(os.path.join(debug_dir, "sharpened.jpg"), sharpened)
            preprocessed_images.append(("sharpened", sharpened))
        except Exception as e:
            print(f"锐化失败: {str(e)}")
        
        # 7. 调整对比度和亮度
        try:
            alpha = 1.5  # 对比度
            beta = 0     # 亮度
            contrast_brightness = cv2.convertScaleAbs(img, alpha=alpha, beta=beta)
            cv2.imwrite(os.path.join(debug_dir, "contrast_brightness.jpg"), contrast_brightness)
            preprocessed_images.append(("contrast_brightness", contrast_brightness))
        except Exception as e:
            print(f"调整对比度和亮度失败: {str(e)}")
        
        # 8. 去噪
        try:
            denoised = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
            cv2.imwrite(os.path.join(debug_dir, "denoised.jpg"), denoised)
            preprocessed_images.append(("denoised", denoised))
        except Exception as e:
            print(f"去噪失败: {str(e)}")
        
        # 9. 边缘检测
        try:
            edges = cv2.Canny(gray, 100, 200)
            cv2.imwrite(os.path.join(debug_dir, "edges.jpg"), edges)
            # 边缘检测结果不适合直接用于 OCR，所以不添加到预处理图像列表中
        except Exception as e:
            print(f"边缘检测失败: {str(e)}")
        
        # 10. 形态学操作 - 膨胀（加粗文字）
        try:
            kernel = np.ones((2, 2), np.uint8)
            dilated = cv2.dilate(binary, kernel, iterations=1)
            cv2.imwrite(os.path.join(debug_dir, "dilated.jpg"), dilated)
            # 膨胀图像需要转换回 3 通道才能用于 PaddleOCR
            dilated_3channel = cv2.cvtColor(dilated, cv2.COLOR_GRAY2BGR)
            preprocessed_images.append(("dilated", dilated_3channel))
        except Exception as e:
            print(f"膨胀操作失败: {str(e)}")
        
        # 11. 形态学操作 - 腐蚀（细化文字）
        try:
            kernel = np.ones((2, 2), np.uint8)
            eroded = cv2.erode(binary, kernel, iterations=1)
            cv2.imwrite(os.path.join(debug_dir, "eroded.jpg"), eroded)
            # 腐蚀图像需要转换回 3 通道才能用于 PaddleOCR
            eroded_3channel = cv2.cvtColor(eroded, cv2.COLOR_GRAY2BGR)
            preprocessed_images.append(("eroded", eroded_3channel))
        except Exception as e:
            print(f"腐蚀操作失败: {str(e)}")
        
        # 12. 调整大小
        try:
            height, width = img.shape[:2]
            scale_factor = 2.0  # 放大两倍
            resized = cv2.resize(img, (int(width * scale_factor), int(height * scale_factor)), interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(os.path.join(debug_dir, "resized.jpg"), resized)
            preprocessed_images.append(("resized", resized))
        except Exception as e:
            print(f"调整大小失败: {str(e)}")
        
        # 使用 PaddleOCR 进行文字识别
        all_recognized_text = []
        all_confidence_scores = []
        
        # 尝试不同的预处理图像
        for name, processed_img in preprocessed_images:
            try:
                print(f"尝试使用 {name} 图像进行 OCR")
                
                # 使用 PaddleOCR 进行识别
                result = ocr(processed_img, cls=True)  # 启用文本方向分类
                
                print(f"{name} OCR 结果类型: {type(result)}")
                
                # 处理识别结果
                recognized_text = []
                confidence_scores = []
                
                # 检查结果类型
                if isinstance(result, tuple):
                    # 如果结果是元组，可能是 (结果列表, 耗时)
                    print(f"{name} OCR 结果是元组，尝试提取第一个元素")
                    if len(result) > 0:
                        result_list = result[0]
                        print(f"{name} 提取的结果列表类型: {type(result_list)}")
                        
                        if isinstance(result_list, list):
                            for line in result_list:
                                if isinstance(line, list) and len(line) >= 2:
                                    # 通常格式是 [坐标, [文本, 置信度]]
                                    text_conf = line[1]
                                    if isinstance(text_conf, list) and len(text_conf) >= 2:
                                        text, confidence = text_conf
                                        recognized_text.append(text)
                                        confidence_scores.append(float(confidence))
                                        print(f"{name} 从元组结果提取: 文本='{text}', 置信度={confidence}")
                elif isinstance(result, list):
                    # 如果结果是列表，按原方式处理
                    for idx, line in enumerate(result):
                        try:
                            print(f"{name} 处理第 {idx+1} 行结果")
                            
                            # 确保 line 是一个列表
                            if not isinstance(line, list):
                                print(f"{name} 行 {idx+1} 不是列表，跳过")
                                continue
                            
                            # 获取文本和置信度
                            if len(line) >= 2:
                                box, text_conf = line
                                
                                # 检查 text_conf 的类型
                                print(f"{name} text_conf 类型: {type(text_conf)}")
                                
                                if isinstance(text_conf, tuple) and len(text_conf) >= 2:
                                    text, confidence = text_conf
                                    # 过滤空文本
                                    if text and text.strip():
                                        recognized_text.append(text)
                                        confidence_scores.append(float(confidence))
                                        print(f"{name} 成功提取: 文本='{text}', 置信度={confidence}")
                                elif isinstance(text_conf, str):
                                    # 如果 text_conf 是字符串，直接作为文本
                                    if text_conf and text_conf.strip():
                                        recognized_text.append(text_conf)
                                        confidence_scores.append(0.0)  # 默认置信度
                                        print(f"{name} 提取字符串: 文本='{text_conf}'")
                                else:
                                    print(f"{name} 无法解析 text_conf: {text_conf}")
                        except Exception as line_error:
                            print(f"{name} 处理行 {idx+1} 时出错: {str(line_error)}")
                
                # 如果当前预处理方法识别到了文字，添加到总结果中
                if recognized_text:
                    print(f"{name} 识别到 {len(recognized_text)} 个文本")
                    all_recognized_text.extend(recognized_text)
                    all_confidence_scores.extend(confidence_scores)
            except Exception as ocr_error:
                print(f"{name} OCR 处理出错: {str(ocr_error)}")
        
        # 如果没有识别到文字，尝试使用 PPStructure 提取文本
        if not all_recognized_text:
            print("所有预处理方法都未识别到文字，尝试使用 PPStructure")
            try:
                # 使用 PPStructure 进行结构化识别
                structure_result = table_engine(img)
                
                # 从结构化结果中提取文本
                for item in structure_result:
                    try:
                        item_type = item.get('type', 'unknown')
                        print(f"处理结构化项目: 类型 = {item_type}")
                        
                        # 从各种类型中提取内容
                        if item_type == 'table':
                            # 直接从表格中提取文本
                            if 'text' in item:
                                all_recognized_text.append(item['text'])
                                all_confidence_scores.append(0.9)
                                print(f"从表格提取到文本: {item['text']}")
                            
                            # 尝试从 html 中提取文本
                            if 'html' in item:
                                from bs4 import BeautifulSoup
                                try:
                                    soup = BeautifulSoup(item['html'], 'html.parser')
                                    table_text = soup.get_text(separator=' ', strip=True)
                                    if table_text:
                                        all_recognized_text.append(table_text)
                                        all_confidence_scores.append(0.9)
                                        print(f"从表格 HTML 提取到文本: {table_text[:50]}...")
                                except Exception as bs_error:
                                    print(f"解析 HTML 出错: {str(bs_error)}")
                        elif item_type in ['text', 'reference', 'title', 'table_caption', 'footer']:
                            if 'text' in item:
                                all_recognized_text.append(item['text'])
                                all_confidence_scores.append(0.9)
                                print(f"从 {item_type} 提取到文本: {item['text']}")
                            elif 'res' in item and isinstance(item['res'], dict) and 'text' in item['res']:
                                all_recognized_text.append(item['res']['text'])
                                all_confidence_scores.append(0.9)
                                print(f"从 {item_type} 的 res 中提取到文本: {item['res']['text']}")
                    except Exception as item_error:
                        print(f"处理结构化项目时出错: {str(item_error)}")
            except Exception as structure_error:
                print(f"结构化识别失败: {str(structure_error)}")
        
        # 如果仍然没有识别到文字，尝试使用 PaddleOCR 的 det_limit_side_len 参数
        if not all_recognized_text:
            print("尝试使用不同的 det_limit_side_len 参数")
            try:
                # 创建一个新的 PaddleOCR 实例，使用不同的参数
                custom_ocr = PaddleOCR(use_angle_cls=True, lang="ch", det_limit_side_len=2240)
                result = custom_ocr(img)
                
                # 处理识别结果
                if isinstance(result, tuple) and len(result) > 0:
                    result_list = result[0]
                    
                    if isinstance(result_list, list):
                        for line in result_list:
                            if isinstance(line, list) and len(line) >= 2:
                                text_conf = line[1]
                                if isinstance(text_conf, list) and len(text_conf) >= 2:
                                    text, confidence = text_conf
                                    if text and text.strip():
                                        all_recognized_text.append(text)
                                        all_confidence_scores.append(float(confidence))
                                        print(f"使用自定义参数提取: 文本='{text}', 置信度={confidence}")
            except Exception as custom_ocr_error:
                print(f"使用自定义参数出错: {str(custom_ocr_error)}")
        
        # 如果仍然没有识别到文字，尝试使用 Tesseract OCR
        if not all_recognized_text:
            print("尝试使用 Tesseract OCR")
            try:
                import pytesseract
                
                # 尝试不同的预处理图像
                for name, img_path in [
                    ("原始图像", os.path.join(debug_dir, "original.jpg")),
                    ("灰度图像", os.path.join(debug_dir, "gray.jpg")),
                    ("二值图像", os.path.join(debug_dir, "binary.jpg")),
                    ("全局二值图像", os.path.join(debug_dir, "global_binary.jpg")),
                    ("反色图像", os.path.join(debug_dir, "inverted.jpg")),
                    ("锐化图像", os.path.join(debug_dir, "sharpened.jpg")),
                    ("对比度调整图像", os.path.join(debug_dir, "contrast_brightness.jpg")),
                    ("去噪图像", os.path.join(debug_dir, "denoised.jpg")),
                    ("膨胀图像", os.path.join(debug_dir, "dilated.jpg")),
                    ("腐蚀图像", os.path.join(debug_dir, "eroded.jpg")),
                    ("放大图像", os.path.join(debug_dir, "resized.jpg"))
                ]:
                    try:
                        # 检查文件是否存在
                        if os.path.exists(img_path):
                            # 使用 Tesseract 进行识别
                            text = pytesseract.image_to_string(img_path, lang='chi_sim+eng')
                            
                            # 如果识别到文字，添加到结果中
                            if text and text.strip():
                                lines = text.strip().split('\n')
                                for line in lines:
                                    if line.strip():
                                        all_recognized_text.append(line.strip())
                                        all_confidence_scores.append(0.8)  # 默认置信度
                                        print(f"Tesseract 从 {name} 提取: 文本='{line.strip()}'")
                    except Exception as tesseract_error:
                        print(f"Tesseract 处理 {name} 出错: {str(tesseract_error)}")
            except ImportError:
                print("未安装 pytesseract，跳过 Tesseract OCR")
            except Exception as tesseract_error:
                print(f"Tesseract OCR 出错: {str(tesseract_error)}")
        
        # 如果仍然没有识别到文字，返回提示信息
        if not all_recognized_text:
            all_recognized_text = ["未识别到文字，请尝试上传更清晰的图片"]
            all_confidence_scores = [0.0]
        
        # 去重
        unique_texts = []
        unique_confidences = []
        for text, conf in zip(all_recognized_text, all_confidence_scores):
            if text not in unique_texts:
                unique_texts.append(text)
                unique_confidences.append(conf)
        
        return {"text": unique_texts, "confidence": unique_confidences}
    except Exception as e:
        # 记录错误信息
        print(f"OCR 处理错误: {str(e)}")
        import traceback
        traceback.print_exc()  # 打印完整的错误堆栈
        
        # 返回错误信息
        recognized_text = ["OCR 处理出错，请重试", "错误信息: " + str(e)]
        confidence_scores = [0.0, 0.0]
        return {"text": recognized_text, "confidence": confidence_scores}

# PDF 文字提取 API 端点
@app.post("/api/extract_pdf")
async def extract_pdf_api(file: UploadFile = File(...)):
    try:
        # 读取文件内容
        content = await file.read()
        
        # 使用 PyMuPDF 提取 PDF 文字
        extracted_text = ""
        
        # 将文件内容保存到内存中的临时文件
        with io.BytesIO(content) as pdf_file:
            # 打开 PDF 文件
            pdf_document = fitz.open(stream=pdf_file.read(), filetype="pdf")
            
            # 遍历每一页并提取文字
            for page_num in range(len(pdf_document)):
                page = pdf_document.load_page(page_num)
                page_text = page.get_text()
                extracted_text += f"--- 第 {page_num + 1} 页 ---\n{page_text}\n\n"
            
            # 关闭 PDF 文件
            pdf_document.close()
        
        # 如果没有提取到文字，返回提示信息
        if not extracted_text.strip():
            extracted_text = "PDF 文件中未提取到文字内容。"
        
        return {"text": extracted_text}
    except Exception as e:
        # 记录错误信息
        print(f"PDF 处理错误: {str(e)}")
        # 返回模拟数据
        extracted_text = f"PDF 处理出错，请重试。错误信息: {str(e)}"
        return {"text": extracted_text}
        # 如果需要返回错误，可以取消下面的注释
        # raise HTTPException(status_code=500, detail=f"PDF 处理失败: {str(e)}")

# 初始化 PaddleOCR
ocr = PaddleOCR(use_angle_cls=True, lang="ch")

# 初始化表格识别模型
table_engine = PPStructure(show_log=False, table=True, ocr=True, lang="ch")

# 表格识别 API 端点
@app.post("/api/recognize_table")
async def recognize_table_api(file: UploadFile = File(...)):
    try:
        # 读取文件内容
        content = await file.read()
        
        # 将文件内容转换为 OpenCV 图像格式
        nparr = np.frombuffer(content, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        
        # 打印图像信息，用于调试
        print(f"表格图像尺寸: {img.shape}")
        
        # 使用 PPStructure 进行表格识别
        result = table_engine(img)
        
        # 打印原始结果，用于调试
        print(f"表格识别原始结果类型: {type(result)}")
        print(f"表格识别原始结果长度: {len(result) if isinstance(result, list) else 'not a list'}")
        
        # 处理识别结果
        table_data = []
        
        # 检查是否有结果
        if not result:
            print("未检测到任何结构")
            return {"table": [["未检测到任何结构，请尝试上传更清晰的图片"]]}
        
        # 遍历所有结果，查找表格
        for idx, item in enumerate(result):
            try:
                item_type = item.get('type', 'unknown')
                print(f"处理项目 {idx+1}: 类型 = {item_type}")
                
                if item_type == 'table':
                    print("找到表格类型")
                    
                    # 尝试从 HTML 中提取表格数据
                    if 'html' in item:
                        print("从 HTML 中提取表格数据")
                        from bs4 import BeautifulSoup
                        try:
                            soup = BeautifulSoup(item['html'], 'html.parser')
                            rows = []
                            
                            # 提取表格行
                            for tr in soup.find_all('tr'):
                                row = []
                                # 提取单元格
                                for td in tr.find_all(['td', 'th']):
                                    cell_text = td.get_text(strip=True)
                                    row.append(cell_text)
                                if row:  # 只添加非空行
                                    rows.append(row)
                            
                            if rows:
                                print(f"从 HTML 提取到 {len(rows)} 行表格数据")
                                table_data = rows
                                break
                        except Exception as bs_error:
                            print(f"解析 HTML 出错: {str(bs_error)}")
                    
                    # 如果没有 HTML 或解析失败，尝试从 text 中提取
                    if not table_data and 'text' in item:
                        print("从 text 中提取表格数据")
                        text = item['text']
                        # 按行分割
                        lines = text.strip().split('\n')
                        for line in lines:
                            # 尝试按空格或制表符分割
                            cells = line.split()
                            if cells:  # 只添加非空行
                                table_data.append(cells)
                        
                        if table_data:
                            print(f"从 text 提取到 {len(table_data)} 行表格数据")
                            break
                
                # 如果是表格标题，也提取出来
                elif item_type == 'table_caption' and 'text' in item:
                    caption = item['text']
                    print(f"提取到表格标题: {caption}")
                    # 将标题作为表格的第一行
                    if not table_data:
                        table_data = [[caption]]
            except Exception as item_error:
                print(f"处理项目 {idx+1} 时出错: {str(item_error)}")
        
        # 如果没有从表格中提取到数据，尝试从其他类型中提取结构化数据
        if not table_data:
            print("未从表格中提取到数据，尝试从其他类型中提取")
            
            # 收集所有文本内容
            all_texts = []
            for item in result:
                try:
                    if 'text' in item:
                        all_texts.append(item['text'])
                    elif 'res' in item and isinstance(item['res'], dict) and 'text' in item['res']:
                        all_texts.append(item['res']['text'])
                except:
                    pass
            
            # 如果有文本内容，尝试构建表格
            if all_texts:
                print(f"从所有项目中提取到 {len(all_texts)} 段文本")
                
                # 将每段文本按行分割
                for text in all_texts:
                    lines = text.strip().split('\n')
                    for line in lines:
                        # 尝试按空格或制表符分割
                        cells = line.split()
                        if cells:  # 只添加非空行
                            table_data.append(cells)
        
        # 如果仍然没有提取到数据，使用 OCR 直接识别
        if not table_data:
            print("尝试使用 OCR 直接识别")
            try:
                ocr_result = ocr(img)
                
                # 处理 OCR 结果
                if isinstance(ocr_result, tuple) and len(ocr_result) > 0:
                    ocr_list = ocr_result[0]
                    
                    # 收集所有识别的文本
                    ocr_texts = []
                    for line in ocr_list:
                        if isinstance(line, list) and len(line) >= 2:
                            text_conf = line[1]
                            if isinstance(text_conf, list) and len(text_conf) >= 1:
                                ocr_texts.append(text_conf[0])
                    
                    # 如果有文本，构建表格
                    if ocr_texts:
                        print(f"OCR 识别到 {len(ocr_texts)} 段文本")
                        for text in ocr_texts:
                            table_data.append([text])
            except Exception as ocr_error:
                print(f"OCR 识别出错: {str(ocr_error)}")
        
        # 如果没有成功提取到表格数据，返回提示信息
        if not table_data:
            print("未能成功提取表格数据，返回提示信息")
            table_data = [
                ["未能识别出表格结构"],
                ["请尝试上传更清晰的图片，或确保图片中包含表格"]
            ]
        
        return {"table": table_data}
    except Exception as e:
        # 记录错误信息
        print(f"表格识别错误: {str(e)}")
        import traceback
        traceback.print_exc()  # 打印完整的错误堆栈
        
        # 返回错误信息
        table_data = [
            ["表格识别出错"],
            [f"错误信息: {str(e)}"],
            ["请尝试上传其他图片"]
        ]
        return {"table": table_data}

# PPT 生成 API 端点
class PPTSlide(BaseModel):
    title: str
    content: str
    keywords: list = []

class PPTRequest(BaseModel):
    content: str
    topic: str = ""

@app.post("/api/generate_ppt")
async def generate_ppt_api(request: PPTRequest):
    try:
        # 这里应该调用 AI 模型生成 PPT 内容
        # 由于没有实际的 AI 模型，这里返回模拟数据
        
        # 模拟幻灯片数据
        slides = [
            {
                "title": f"{request.topic} - 介绍",
                "content": "这是介绍幻灯片的内容。",
                "keywords": ["介绍", "概述"]
            },
            {
                "title": "主要内容",
                "content": "这是主要内容幻灯片的内容。",
                "keywords": ["内容", "详情"]
            },
            {
                "title": "总结",
                "content": "这是总结幻灯片的内容。",
                "keywords": ["总结", "结论"]
            }
        ]
        
        return {"slides": slides}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"PPT 生成失败: {str(e)}")

# PPT 下载 API 端点
@app.post("/api/download_ppt")
async def download_ppt_api(request: PPTRequest):
    try:
        # 这里应该生成实际的 PPT 文件
        # 由于没有实际的 PPT 生成，这里返回模拟数据
        
        # 创建一个简单的文本文件作为示例
        content = f"PPT 标题: {request.topic}\n\n内容:\n{request.content}"
        
        # 返回文件
        return Response(
            content=content.encode(),
            media_type="application/vnd.openxmlformats-officedocument.presentationml.presentation",
            headers={"Content-Disposition": f"attachment; filename={request.topic or 'presentation'}.pptx"}
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"PPT 下载失败: {str(e)}")

# 文生图 API 端点
class ImageRequest(BaseModel):
    prompt: str
    model: str
    size: str
    style: str = "natural"

@app.post("/api/generate_image")
async def generate_image_api(request: ImageRequest):
    try:
        # 这里应该调用图像生成 API
        # 由于没有实际的图像生成 API，这里返回模拟数据
        
        # 模拟图像 URL（使用占位图像）
        image_url = f"https://via.placeholder.com/{request.size.replace('x', '/')}"
        
        return {"image_url": image_url}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"图像生成失败: {str(e)}")

# 文本优化 API 端点
class OptimizeRequest(BaseModel):
    text: str
    model: str
    style: str = "formal"
    instruction: str = ""

@app.post("/api/optimize")
async def optimize_api(request: OptimizeRequest):
    try:
        # 这里应该调用 AI 模型优化文本
        # 由于没有实际的 AI 模型，这里返回模拟数据
        
        # 模拟优化后的文本
        optimized_text = f"这是优化后的文本。原始文本: {request.text}\n\n使用模型: {request.model}\n风格: {request.style}"
        if request.instruction:
            optimized_text += f"\n\n按照指令: {request.instruction}"
        
        return {"optimized_text": optimized_text}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文本优化失败: {str(e)}")

# 通义千问 API 端点
class AliImageRequest(BaseModel):
    prompt: str
    model: str
    size: str

@app.post("/api/ali_image")
async def ali_image_api(request: AliImageRequest):
    try:
        # 这里应该调用阿里云通义千问 API
        # 由于没有实际的 API，这里返回模拟数据
        
        # 模拟图像 URL（使用占位图像）
        image_url = f"https://via.placeholder.com/{request.size.replace('x', '/')}"
        
        return {"image_url": image_url}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"通义千问 API 调用失败: {str(e)}")

@app.get("/config/deepseek")
async def home(request: Request):
    return templates.TemplateResponse(
        "config_model.html",
        {
            "request": request,
            "config": config,
            "current_model": config['models']['deepseek']['default_model']
        }
    )

@app.post("/api/chat/deepseek")
async def chat_deepseek(chat_request: ChatRequest):
    client = OpenAI(
        api_key=config['models']['deepseek']['api_key'],
        base_url=config['models']['deepseek']['base_url']
    )
    
    try:
        # Prepare the messages list
        messages = [{"role": "system", "content": "You are a helpful assistant"}]
        
        # If file content is provided, add it to the context
        if chat_request.file_content:
            messages.append({
                "role": "user",
                "content": f"Here is the document content to provide context:\n\n{chat_request.file_content}\n\nNow, please respond to this question: {chat_request.message}"
            })
        else:
            messages.append({"role": "user", "content": chat_request.message})
        
        response = client.chat.completions.create(
            model=config['models']['deepseek']['default_model'],
            messages=messages,
            stream=False
        )
        
        return {"response": response.choices[0].message.content}
    except Exception as e:
        return {"error": str(e)}

# 深度求索聊天页面-可以访问
@app.get("/chat/deepseek")
async def deepseek_chat(request: Request):
    return templates.TemplateResponse(
        "deepseekChat.html",
        {
            "request": request,
            "config": config
        }
    )

# Add a new endpoint for file upload
#报错：localhost:：8000显示文件上传失败
@app.post("/api/upload")
async def upload_file(file: UploadFile = File(...)):
    try:
        # 检查文件是否为空
        if not file:
            raise HTTPException(status_code=400, detail="No file provided")
            
        # 读取文件内容
        content = await file.read()
        
        # 尝试以 UTF-8 解码
        try:
            text_content = content.decode('utf-8')
        except UnicodeDecodeError:
            # 如果 UTF-8 解码失败，尝试其他编码
            try:
                text_content = content.decode('gbk')
            except UnicodeDecodeError:
                raise HTTPException(status_code=400, detail="Unsupported file encoding")
        
        # 检查文件内容是否为空
        if not text_content.strip():
            raise HTTPException(status_code=400, detail="File is empty")
            
        return {"file_content": text_content}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error processing file: {str(e)}")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8001) 