from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from paddleocr import PaddleOCR, PPStructure
import cv2
import numpy as np
from typing import List, Annotated, Optional
from pydantic import BaseModel
import io
from openai import OpenAI
from config import OPENAI_API_KEY, OPENAI_BASE_URL, AVAILABLE_MODELS
import fitz
from PIL import Image
import json
from pptx import Presentation
from pptx.util import Inches, Pt
from io import BytesIO
import requests
import base64
import os
from fastapi.middleware.cors import CORSMiddleware
import time

# 创建FastAPI实例
app = FastAPI()

# 添加 CORS 中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源，生产环境中建议设置具体的域名
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有请求头
)

# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="static"), name="static")

# 初始化PaddleOCR
ocr = PaddleOCR(use_angle_cls=True, lang="ch")

# 初始化表格识别模型
table_engine = PPStructure(show_log=False, table=True, ocr=True, lang="ch")

# 定义数据模型
class OCRResponse(BaseModel):
    text: List[str]
    confidence: List[float]

class OptimizeRequest(BaseModel):
    text: str
    model: str
    instruction: str = "请帮助优化和完以下文本，使其更加通顺、准确。"

class PPTAnalyzeRequest(BaseModel):
    text: str

class ImageGenerationRequest(BaseModel):
    prompt: str
    model: str = "dall-e-3"

class SlideContent(BaseModel):
    title: str
    content: str
    keywords: List[str]
    image_suggestion: str
    image_url: Optional[str] = None

class PPTContent(BaseModel):
    slides: List[SlideContent]

# 配置可用的图像模型
# 配置可用的图像模型
AVAILABLE_IMAGE_MODELS = [
    {
        "id": "dall-e-3",
        "name": "DALL-E 3",
        "description": "最新的 DALL-E 模型，支持高质量图像生成，最大分辨率 1024x1024",
        "max_tokens": 1000,
        "sizes": ["1024x1024", "1792x1024", "1024x1792"],
        "quality": ["standard", "hd"]
    },
    {
        "id": "dall-e-2",
        "name": "DALL-E 2",
        "description": "较早的 DALL-E 模型，生成速度更快，支持多种尺寸",
        "max_tokens": 1000,
        "sizes": ["256x256", "512x512", "1024x1024"],
        "quality": ["standard"]
    }
]

@app.get("/", response_class=HTMLResponse)
async def root():
    try:
        with open("static/root.html", encoding="utf-8") as f:
            content = f.read()
        return content
    except Exception as e:
        print(f"Error reading root.html: {e}")
        return {"message": "Welcome to OCR API"}

@app.get("/upload", response_class=HTMLResponse)
async def upload_page():
    with open("static/index.html", encoding="utf-8") as f:
        content = f.read()
    return content

@app.get("/upload_pdf", response_class=HTMLResponse)
async def upload_pdf_page():
    with open("static/index_pdf.html", encoding="utf-8") as f:
        content = f.read()
    return content

@app.get("/upload_table", response_class=HTMLResponse)
async def upload_table_page():
    with open("static/index_table.html", encoding="utf-8") as f:
        content = f.read()
    return content

@app.get("/upload_ppt", response_class=HTMLResponse)
async def upload_ppt_page():
    with open("static/index_ppt.html", encoding="utf-8") as f:
        content = f.read()
    return content

@app.get("/word2image", response_class=HTMLResponse)
async def word2image_page():
    with open("static/index_word2image.html", encoding="utf-8") as f:
        content = f.read()
    return content

@app.get("/optimize", response_class=HTMLResponse)
async def optimize_page():
    with open("static/index_optimize.html", encoding="utf-8") as f:
        content = f.read()
    return content

@app.post("/ocr/", response_model=OCRResponse)
async def perform_ocr(file: UploadFile = File(...)):
    contents = await file.read()
    texts = []
    confidences = []

    if file.content_type == "application/pdf":
        pdf_stream = io.BytesIO(contents)
        try:
            pdf_document = fitz.open(stream=pdf_stream, filetype="pdf")
            for page_num in range(len(pdf_document)):
                page = pdf_document[page_num]
                pix = page.get_pixmap()
                img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
                img_cv = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
                result = ocr.ocr(img_cv, cls=True)
                if result:
                    for line in result[0]:
                        texts.append(line[1][0])
                        confidences.append(float(line[1][1]))
            pdf_document.close()
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))
    else:
        nparr = np.frombuffer(contents, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        result = ocr.ocr(img, cls=True)
        if result:
            for line in result[0]:
                texts.append(line[1][0])
                confidences.append(float(line[1][1]))

    if not texts:
        raise HTTPException(status_code=400, detail="未能别到任何文本")
    return OCRResponse(text=texts, confidence=confidences)

@app.post("/ocr_pdf/", response_model=OCRResponse)
async def perform_pdf_ocr(file: UploadFile = File(...)):
    try:
        contents = await file.read()
        pdf_stream = io.BytesIO(contents)
        texts = []
        confidences = []
        
        try:
            pdf_document = fitz.open(stream=pdf_stream, filetype="pdf")
            for page_num in range(len(pdf_document)):
                page = pdf_document[page_num]
                pix = page.get_pixmap()
                img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
                img_cv = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
                result = ocr.ocr(img_cv, cls=True)
                
                if result and result[0]:
                    for line in result[0]:
                        texts.append(line[1][0])
                        confidences.append(float(line[1][1]))
            
            pdf_document.close()
            
            if not texts:
                raise HTTPException(status_code=400, detail="未能识别到任何文本")
            
            return OCRResponse(text=texts, confidence=confidences)
            
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"PDF处理失败: {str(e)}")
            
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件处理失败: {str(e)}")

@app.post("/ocr_table/")
async def perform_table_ocr(file: UploadFile = File(...)):
    contents = await file.read()
    nparr = np.frombuffer(contents, np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    
    try:
        result = ocr.ocr(img, cls=True)
        if not result or not result[0]:
            return {"error": "未识别到文本"}
            
        text_items = []
        for line in result[0]:
            box = line[0]
            text = line[1][0]
            confidence = line[1][1]
            y_center = (box[0][1] + box[2][1]) / 2
            x_center = (box[0][0] + box[2][0]) / 2
            text_items.append({
                'text': text,
                'y': y_center,
                'x': x_center,
                'confidence': confidence
            })
        
        y_tolerance = 10
        text_items.sort(key=lambda x: x['y'])
        
        rows = []
        current_row = []
        current_y = text_items[0]['y'] if text_items else 0
        
        for item in text_items:
            if abs(item['y'] - current_y) <= y_tolerance:
                current_row.append(item)
            else:
                if current_row:
                    current_row.sort(key=lambda x: x['x'])
                    rows.append(current_row)
                current_row = [item]
                current_y = item['y']
        
        if current_row:
            current_row.sort(key=lambda x: x['x'])
            rows.append(current_row)
        
        html_table = '<table border="1">\n'
        for row in rows:
            html_table += '<tr>\n'
            for cell in row:
                html_table += f'<td>{cell["text"]}</td>\n'
            html_table += '</tr>\n'
        html_table += '</table>'
        
        return {
            "tables": [{
                "html": html_table
            }]
        }
        
    except Exception as e:
        return {"error": str(e)}

@app.get("/models")
async def get_models():
    return {"models": AVAILABLE_MODELS}

@app.post("/optimize")
async def optimize_text(request: OptimizeRequest):
    client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_BASE_URL)
    
    try:
        response = client.chat.completions.create(
            model=request.model,
            messages=[
                {"role": "system", "content": request.instruction},
                {"role": "user", "content": request.text}
            ],
            temperature=0.7,
            max_tokens=1000
        )
        
        optimized_text = response.choices[0].message.content
        return {"optimized_text": optimized_text}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/analyze_ppt/")
async def analyze_ppt(request: PPTAnalyzeRequest):
    try:
        client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_BASE_URL)
        
        prompt = """请分析以下文本，将其转换为PPT大纲格式。要求：
1. 使用总-分-结构进行组织
2. 提取关键词
3. 为每个部分提供合适的配图建议
4. 精炼内容，突出重点

请按照以下JSON格式返回结果：
{
    "slides": [
        {
            "title": "标题",
            "content": "精炼后的内容",
            "image_suggestion": "配图建议",
            "keywords": ["关键词1", "关键词2"]
        }
    ]
}"""

        response = client.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": prompt},
                {"role": "user", "content": request.text}
            ],
            temperature=0.7,
        )
        
        result = json.loads(response.choices[0].message.content)
        return result
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/generate_ppt/")
async def generate_ppt(ppt_content: PPTContent):
    try:
        prs = Presentation()
        prs.slide_width = Inches(16)
        prs.slide_height = Inches(9)
        
        for index, slide_info in enumerate(ppt_content.slides):
            slide_layout = prs.slide_layouts[1]
            slide = prs.slides.add_slide(slide_layout)
            
            title = slide.shapes.title
            title.text = slide_info.title
            
            if len(slide.placeholders) > 1:
                content = slide.placeholders[1]
                tf = content.text_frame
                tf.clear()
                
                p = tf.add_paragraph()
                p.text = slide_info.content
                
                p = tf.add_paragraph()
                p.text = "\n关键词：" + "、".join(slide_info.keywords)
                
                p = tf.add_paragraph()
                p.text = "\n建议配图：" + slide_info.image_suggestion
            
            if slide_info.image_url:
                try:
                    image_response = requests.get(slide_info.image_url)
                    image_response.raise_for_status()
                    image_stream = BytesIO(image_response.content)
                    slide.shapes.add_picture(image_stream, Inches(1), Inches(2), width=Inches(4))
                except Exception as e:
                    print(f"Failed to download or insert image: {e}")
        
        ppt_stream = BytesIO()
        prs.save(ppt_stream)
        ppt_stream.seek(0)
        
        return StreamingResponse(
            ppt_stream,
            media_type="application/vnd.openxmlformats-officedocument.presentationml.presentation",
            headers={
                'Content-Disposition': 'attachment; filename="generated_slides.pptx"'
            }
        )
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/available_image_models")
async def get_available_image_models():
    return {"models": AVAILABLE_IMAGE_MODELS}

@app.post("/generate_image/")
async def generate_image(request: ImageGenerationRequest):
    try:
        if not request.prompt:
            raise HTTPException(status_code=400, detail="未供图片描述")
            
        client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_BASE_URL)
        
        try:
            response = client.images.generate(
                model=request.model,
                prompt=request.prompt,
                n=1,
                size="1024x1024",
                quality="standard",
                response_format="url"
            )
            
            image_url = response.data[0].url
            return {"image_url": image_url}
            
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"图片生成失败: {str(e)}")
            
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/api_ali", response_class=HTMLResponse)
async def api_ali_page():
    with open("static/index_api_ali.html", encoding="utf-8") as f:
        content = f.read()
    return content

class AliImageRequest(BaseModel):
    prompt: str
    model: str
    api_key: str
    base_url: str = "https://dashscope.aliyuncs.com/api/v1"
    
@app.post("/generate_ali_image")
async def generate_ali_image(request: AliImageRequest):
    try:
        if request.model == "flux-schnell":
            # 提交异步任务
            submit_url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text2image/image-synthesis"
            request_body = {
                "model": "flux-schnell",
                "input": {
                    "prompt": request.prompt
                },
                "parameters": {
                    "size": "1024*1024",
                    "steps": 4,  # flux-schnell 默认步数
                    "seed": 1234,
                    "guidance": 3.5
                }
            }

            headers = {
                "Authorization": f"Bearer {request.api_key}",
                "Content-Type": "application/json",
                "X-DashScope-Async": "enable"  # 启用异步模式
            }

            # 提交任务
            response = requests.post(submit_url, json=request_body, headers=headers)
            response.raise_for_status()
            task_data = response.json()
            
            if "output" not in task_data or "task_id" not in task_data["output"]:
                raise Exception("未获取到任务ID")
            
            task_id = task_data["output"]["task_id"]
            
            # 轮询任务状态
            max_retries = 30  # 最大重试次数
            retry_interval = 2  # 重试间隔（秒）
            
            for _ in range(max_retries):
                # 查询任务状态
                status_url = f"https://dashscope.aliyuncs.com/api/v1/tasks/{task_id}"
                status_response = requests.get(
                    status_url,
                    headers={"Authorization": f"Bearer {request.api_key}"}
                )
                status_response.raise_for_status()
                status_data = status_response.json()
                
                task_status = status_data["output"]["task_status"]
                
                if task_status == "SUCCEEDED":
                    # 任务成功完成
                    return status_data
                elif task_status == "FAILED":
                    # 任务失败
                    error_message = status_data.get("output", {}).get("message", "任务执行失败")
                    raise Exception(error_message)
                elif task_status in ["PENDING", "RUNNING"]:
                    # 任务仍在进行中，等待后重试
                    time.sleep(retry_interval)
                else:
                    raise Exception(f"未知的任务状态: {task_status}")
            
            raise Exception("任务执行超时")

        else:
            # 通义千问模型的处理逻辑保持不变
            endpoint = "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
            request_body = {
                "model": request.model,
                "messages": [
                    {
                        "role": "user",
                        "content": [
                            {
                                "type": "text",
                                "text": f"Generate an image based on this description: {request.prompt}. Please generate the image directly without any explanation."
                            }
                        ]
                    }
                ],
                "parameters": {
                    "result_format": "message",
                    "seed": 1234,
                    "max_tokens": 2000,
                    "top_p": 0.8,
                    "temperature": 0.8
                }
            }

            headers = {
                "Authorization": f"Bearer {request.api_key}",
                "Content-Type": "application/json"
            }

            response = requests.post(endpoint, json=request_body, headers=headers)
            response.raise_for_status()
            return response.json()

    except Exception as e:
        print(f"Error details: {str(e)}")  # 调试日志
        raise HTTPException(status_code=500, detail=str(e))

class AliChatRequest(BaseModel):
    prompt: str
    model: str
    api_key: str
    base_url: str = "https://dashscope.aliyuncs.com/api/v1"

@app.post("/generate_ali_chat")
async def generate_ali_chat(request: AliChatRequest):
    try:
        endpoint = "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
        request_body = {
            "model": "qwq-32b-preview",  # 固定使用 qwq-32b-preview 模型
            "messages": [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "text",
                            "text": request.prompt
                        }
                    ]
                }
            ],
            "parameters": {
                "result_format": "message",
                "seed": 1234,
                "max_tokens": 2000,
                "top_p": 0.8,
                "temperature": 0.8
            }
        }

        headers = {
            "Authorization": f"Bearer {request.api_key}",
            "Content-Type": "application/json"
        }

        print(f"Sending chat request to: {endpoint}")  # 调试日志
        print(f"Request body: {request_body}")    # 调试日志
        
        response = requests.post(endpoint, json=request_body, headers=headers)
        response.raise_for_status()
        return response.json()

    except Exception as e:
        print(f"Error details: {str(e)}")  # 调试日志
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)