from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import httpx
from openai import OpenAI, AsyncOpenAI, APIStatusError
import json
from scipy import stats
from statsmodels.stats.proportion import proportions_ztest
import numpy as np
import asyncio
import os

app = FastAPI()

# --- 添加 CORS 中间件 ---
origins = [
    "http://localhost",
    "http://localhost:8080", # 如果您使用 live server 或类似工具
    "null", # 允许来自 file:// 的请求 (对于本地 HTML 文件)
    "*" # 在开发环境中可以允许所有来源，生产环境应更严格
]

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"], # 允许所有方法 (GET, POST, OPTIONS, etc.)
    allow_headers=["*"], # 允许所有头部
)


# --- 从 viral_predictor.py 迁移过来的辅助函数 ---

def calc_confidence(reader, vote_a, vote_b):
    if vote_a == 0 and vote_b == 0:
        return "-", 0.0

    if vote_a == 0:
        return "B", 100.0
    if vote_b == 0:
        return "A", 100.0

    try:
        if vote_a >= vote_b:
            # 确保输入是整数
            count = np.array([int(vote_a), int(vote_b)])
            nobs = np.array([int(reader), int(reader)])
            # 检查nobs是否为零或包含零
            if np.any(nobs == 0):
                 return "A" if vote_a > vote_b else "B", 50.0 # 或者其他默认/错误处理

            z_stat, p_value = proportions_ztest(
                count=count,
                nobs=nobs,
                alternative='larger'
            )
            # p_value 代表 B 不比 A 好的概率，所以 (1 - p_value) 是 A 比 B 好的置信度
            confidence = (1 - p_value) * 100
            return "A", confidence if not np.isnan(confidence) else 50.0
        else:
            # 确保输入是整数
            count = np.array([int(vote_b), int(vote_a)])
            nobs = np.array([int(reader), int(reader)])
             # 检查nobs是否为零或包含零
            if np.any(nobs == 0):
                 return "A" if vote_a > vote_b else "B", 50.0 # 或者其他默认/错误处理

            z_stat, p_value = proportions_ztest(
                count=count,
                nobs=nobs,
                alternative='larger'
            )
             # p_value 代表 A 不比 B 好的概率，所以 (1 - p_value) 是 B 比 A 好的置信度
            confidence = (1 - p_value) * 100
            return "B", confidence if not np.isnan(confidence) else 50.0
    except Exception as e:
        print(f"Error calculating confidence: {e}")
        # 发生错误时回退到简单比例
        total_votes = vote_a + vote_b
        if total_votes == 0:
            return "-", 0.0
        if vote_a > vote_b:
            return "A", (vote_a / total_votes) * 100
        elif vote_b > vote_a:
            return "B", (vote_b / total_votes) * 100
        else:
            return "-", 50.0 # Votes are equal

async def get_prediction(client: AsyncOpenAI, prompt: str, model: str):
    try:
        completion = await client.chat.completions.create(
            model=model,
            messages=[
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            temperature=1.0,
            presence_penalty=2,
            response_format={"type": "json_object"}
        )
        if not completion or not completion.choices or not completion.choices[0].message:
            print("API returned empty result, using default values")
            return {"like": False, "comment": False, "share": False, "quote": False}

        prediction_content = completion.choices[0].message.content
        try:
            return json.loads(prediction_content)
        except json.JSONDecodeError:
            print(f"JSONDecodeError, using default values. Raw response: {prediction_content[:200]}")
            # 尝试清理响应字符串，移除可能的非 JSON 前缀/后缀
            try:
                # 查找第一个 '{' 和最后一个 '}'
                start_index = prediction_content.find('{')
                end_index = prediction_content.rfind('}')
                if start_index != -1 and end_index != -1 and start_index < end_index:
                    cleaned_content = prediction_content[start_index:end_index+1]
                    return json.loads(cleaned_content)
                else:
                    raise json.JSONDecodeError("No valid JSON object found", prediction_content, 0)
            except json.JSONDecodeError:
                 print(f"Could not parse JSON even after cleaning. Raw response: {prediction_content[:200]}")
                 return {"like": False, "comment": False, "share": False, "quote": False}

    except APIStatusError as e:
        print(f"API Status Error: {e.status_code} - {e.response.text}")
        return {"like": False, "comment": False, "share": False, "quote": False}
    except httpx.ConnectTimeout:
        print("API request timed out, using default values")
        return {"like": False, "comment": False, "share": False, "quote": False}
    except Exception as e:
        print(f"Unknown error in get_prediction: {str(e)}")
        return {"like": False, "comment": False, "share": False, "quote": False}

# --- API 模型定义 ---

class PredictRequest(BaseModel):
    version_a: str
    version_b: str
    platform: str
    max_reader: int = 16
    # api_key: str # 移除 API Key 输入
    model: str = "deepseek-chat"
    batch_size: int = 3 # 并行处理3个预测任务

class PredictionResult(BaseModel):
    reader: int
    like_a: int
    comment_a: int
    share_a: int
    quote_a: int
    total_a: int
    like_b: int
    comment_b: int
    share_b: int
    quote_b: int
    total_b: int
    like_winner: str
    like_confidence: float
    comment_winner: str
    comment_confidence: float
    share_winner: str
    share_confidence: float
    quote_winner: str
    quote_confidence: float
    engagement_a: list[int] # 用于图表
    engagement_b: list[int] # 用于图表
    user_steps: list[int]   # 用于图表

# --- API 端点 ---

from fastapi import Depends # Add Depends for query parameter model validation

@app.get("/predict") # Changed from POST to GET
async def predict_stream_get(request_data: PredictRequest = Depends(), http_request: Request = None): # Use Depends() to get data from query params, make http_request optional if not always needed

    async def event_generator():
        reader = 0
        like_a = comment_a = share_a = quote_a = total_a = 0
        like_b = comment_b = share_b = quote_b = total_b = 0
        engagement_a_history = [0]
        engagement_b_history = [0]
        user_steps_history = [0]

        # 使用硬编码的 API Key
        fixed_api_key = "sk-4a4b77b1657c4c4aade84d3f8374df9b"
        client = AsyncOpenAI(
            base_url="https://api.deepseek.com",
            api_key=fixed_api_key,
            timeout=30.0,
        )

        prompt_a = f"""You are scrolling through {request_data.platform} and came across the following content:
        '''
        {request_data.version_a}
        '''
        Decide whether to like, comment, share (retweet, repost, etc.), or quote.
        Output your decision STRICTLY as a JSON object with the following fields ONLY:
        - "like": boolean
        - "comment": boolean
        - "share": boolean
        - "quote": boolean
        Ensure the output is ONLY the JSON object and nothing else."""

        prompt_b = f"""You are scrolling through {request_data.platform} and came across the following content:
        '''
        {request_data.version_b}
        '''
        Decide whether to like, comment, share (retweet, repost, etc.), or quote.
        Output your decision STRICTLY as a JSON object with the following fields ONLY:
        - "like": boolean
        - "comment": boolean
        - "share": boolean
        - "quote": boolean
        Ensure the output is ONLY the JSON object and nothing else."""

        try:
            while reader < request_data.max_reader:
                 # Check if client disconnected
                if await http_request.is_disconnected():
                    break

                # 计算实际需要处理的读者数(不超过剩余读者数)
                remaining_readers = request_data.max_reader - reader
                current_batch_size = min(request_data.batch_size, remaining_readers)
                
                # 获取并行预测
                tasks = []
                for _ in range(current_batch_size):
                    tasks.append(get_prediction(client, prompt_a, request_data.model))
                    tasks.append(get_prediction(client, prompt_b, request_data.model))
                
                results = await asyncio.gather(*tasks)
                
                # 处理预测结果
                for i in range(0, len(results), 2):
                    pred_a = results[i]
                    pred_b = results[i+1]
                    reader += 1

                    current_like_a = pred_a.get("like", False)
                    current_comment_a = pred_a.get("comment", False)
                    current_share_a = pred_a.get("share", False)
                    current_quote_a = pred_a.get("quote", False)

                    current_like_b = pred_b.get("like", False)
                    current_comment_b = pred_b.get("comment", False)
                    current_share_b = pred_b.get("share", False)
                    current_quote_b = pred_b.get("quote", False)

                    like_a += current_like_a
                    comment_a += current_comment_a
                    share_a += current_share_a
                    quote_a += current_quote_a
                    total_a = like_a + comment_a + share_a + quote_a

                    like_b += current_like_b
                    comment_b += current_comment_b
                    share_b += current_share_b
                    quote_b += current_quote_b
                    total_b = like_b + comment_b + share_b + quote_b

                    engagement_a_history.append(total_a)
                    engagement_b_history.append(total_b)
                    user_steps_history.append(reader)

                    # --- 计算当前步的置信度 ---
                    like_winner, like_confidence = calc_confidence(reader, like_a, like_b)
                    comment_winner, comment_confidence = calc_confidence(reader, comment_a, comment_b)
                    share_winner, share_confidence = calc_confidence(reader, share_a, share_b)
                    quote_winner, quote_confidence = calc_confidence(reader, quote_a, quote_b)

                    # --- 准备实时数据 ---
                    stream_data = {
                        "type": "update",
                        "reader": reader,
                        "prediction_a": pred_a,
                        "prediction_b": pred_b,
                        "cumulative": {
                            "like_a": like_a, "comment_a": comment_a, "share_a": share_a, "quote_a": quote_a, "total_a": total_a,
                            "like_b": like_b, "comment_b": comment_b, "share_b": share_b, "quote_b": quote_b, "total_b": total_b,
                        },
                        "confidence": {
                            "like": {"winner": like_winner, "value": like_confidence},
                            "comment": {"winner": comment_winner, "value": comment_confidence},
                            "share": {"winner": share_winner, "value": share_confidence},
                            "quote": {"winner": quote_winner, "value": quote_confidence},
                        },
                        "chart_data": {
                            "engagement_a": engagement_a_history,
                            "engagement_b": engagement_b_history,
                            "user_steps": user_steps_history
                        }
                    }
                    # 发送 SSE 事件
                    yield f"data: {json.dumps(stream_data)}\n\n"
                    await asyncio.sleep(0.1)

            # --- 循环结束后发送最终结果和完成信号 ---
            final_data = {
                "type": "final",
                 "reader": reader,
                 "cumulative": {
                    "like_a": like_a, "comment_a": comment_a, "share_a": share_a, "quote_a": quote_a, "total_a": total_a,
                    "like_b": like_b, "comment_b": comment_b, "share_b": share_b, "quote_b": quote_b, "total_b": total_b,
                 },
                 "confidence": {
                    "like": {"winner": like_winner, "value": like_confidence},
                    "comment": {"winner": comment_winner, "value": comment_confidence},
                    "share": {"winner": share_winner, "value": share_confidence},
                    "quote": {"winner": quote_winner, "value": quote_confidence},
                 },
                 "chart_data": {
                    "engagement_a": engagement_a_history,
                    "engagement_b": engagement_b_history,
                    "user_steps": user_steps_history
                 }
            }
            yield f"data: {json.dumps(final_data)}\n\n"
            yield f"data: {json.dumps({'type': 'done'})}\n\n"

        except asyncio.CancelledError:
            pass
        except Exception as e:
            print(f"ERROR in stream generator: {e}")
            # Optionally yield an error message to the client
            error_data = {"type": "error", "message": str(e)}
            try:
                 yield f"data: {json.dumps(error_data)}\n\n"
            except Exception: # Handle cases where yielding itself fails (e.g., client already gone)
                 pass
        finally:
            pass
            # Ensure client is closed if needed, though AsyncOpenAI might handle this with context managers if used differently
            # await client.close() # Depending on OpenAI client library specifics

    return StreamingResponse(event_generator(), media_type="text/event-stream")

# 添加一个根路径用于测试服务器是否运行
@app.get("/")
def read_root():
    return {"message": "Viral Predictor Backend is running"}

# 运行服务器 (通常在命令行中使用 uvicorn)
# if __name__ == "__main__":
#     import uvicorn
#     uvicorn.run(app, host="0.0.0.0", port=8000)