import json
import logging
import math
import os
import time
from typing import List

from fastapi import APIRouter, BackgroundTasks, Depends
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel, Field
from sentence_transformers import SentenceTransformer, SentencesDataset, losses, util
from sqlalchemy.orm import Session
from starlette import status
from starlette.responses import JSONResponse
from torch.utils.data import DataLoader

from api.database.curd import get_sample, find_standrad_text, find_model_path, models_list, create_model
from api.database.database import get_db
from api.database.models import ModelEntity
from api.fastwebsocket import send_message_to_client
from api.utils import model_path, get_input_example, rootpath

router = APIRouter()


class TrainItem(BaseModel):
    premodel_name: str | None = Field(..., description="模型文件名", title="这是什么")
    sample_name: str
    user_name: str | None = None
    num: int = 20
    new_model_name: str | None = None
    client_id: str | None = None


def simple_callback(score: float, epoch: int, steps: int):
    logging.info(f"score: {score}, epoch: {epoch}")


async def print_train_message(client_id: str, message: str):
    if client_id:
        obj_dict = {
            "message": message,
            "type": 'trainmodel'
        }
        json_str = json.dumps(obj_dict)
        print(message)
        await send_message_to_client(client_id, json_str)


async def train_model(item: TrainItem, db: Session):
    start_time = time.perf_counter()

    user_name = item.user_name
    if not item.premodel_name or item.premodel_name == "all-MiniLM-L6-v2" or not user_name:
        item.premodel_name = "all-MiniLM-L6-v2"
        user_name = "root"

    model_file_name = os.path.join(model_path(), user_name)
    if not os.path.exists(model_file_name):
        os.makedirs(model_file_name)

    model_file_name = os.path.join(model_file_name, item.premodel_name)
    if not os.path.exists(model_file_name):
        await print_train_message(item.client_id, f"无效的预训练模型:{model_file_name}")
        return

    # 根据样本名找到样本文件;
    sample = get_sample(db, item.sample_name, item.user_name)
    if not sample:
        await print_train_message(item.client_id, "未找到样本信息")
        return

    sample_file_name = os.path.join(rootpath(), sample.path)
    if not os.path.exists(sample_file_name):
        await print_train_message(item.client_id, "未找到样本文件")
        return

    train_examples = get_input_example(sample_file_name)
    if not train_examples:
        await print_train_message(item.client_id, "无效的样本文件")
        return

    model = SentenceTransformer(model_file_name)

    train_dataset = SentencesDataset(train_examples, model)

    train_batch_size = 16
    num_epochs = item.num

    train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)

    warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1)  # 10% of train data for warm-up

    # evaluator = EmbeddingSimilarityEvaluator.from_input_examples(examples=train_examples)

    train_loss = losses.CosineSimilarityLoss(model)

    is_common_model = False
    if not item.new_model_name:
        item.new_model_name = item.premodel_name
        is_common_model = True

    new_model_save_path = os.path.join(model_path(), item.user_name, item.new_model_name)

    # Tune the model

    model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=num_epochs,
              warmup_steps=warmup_steps, output_path=new_model_save_path, callback=simple_callback)

    # 将结果存到数据库中
    if not is_common_model:
        new_model_save_path = os.path.relpath(new_model_save_path, rootpath())
        model_entity = ModelEntity(user_name=item.user_name, premodel_name=item.new_model_name,
                                   path=new_model_save_path)
        create_model(model_entity, db)

    end_time = time.perf_counter()
    logging.info(f"已完成训练 共耗时${end_time - start_time}秒")
    log_message = f" 新模型{item.new_model_name} 样本为{item.sample_name} 训练完成"
    await print_train_message(item.client_id, log_message)


@router.post('/train')
async def sample_train(item: TrainItem, background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
    background_tasks.add_task(train_model, item, db)
    return JSONResponse(content={"message": "已提交训练"}, status_code=status.HTTP_200_OK)


# 获取模型列表
@router.get('/list')
def model_list(user_name: str, db: Session = Depends(get_db)):
    models = models_list(user_name, db)
    if not models:
        return JSONResponse(content={"message": ""}, status_code=status.HTTP_200_OK)
    items_json = jsonable_encoder(models)
    return JSONResponse(content={"message": json.dumps(items_json)}, status_code=status.HTTP_200_OK)


class ReasonItem(BaseModel):
    standrad_name: str
    premodel_name: str
    user_name: str = None
    text_list: list[str]
    top: int = None


class ScoreItem(BaseModel):
    text: str
    score: float


class ResultItem(BaseModel):
    origin_name: str
    target_list: list[ScoreItem]


@router.post('/reasoning', response_model=list[ResultItem],
             name='文本相似度推理计算')
async def sample_reasoning(item: ReasonItem, db: Session = Depends(get_db)):
    """
        通过给定的文本列表计算文本相似度。

        **请求体:**
        - `standrad_name` (str): 标准库名，必须唯一。
        - `model_name` (str): 预训练的模型名,如果为空,会默认最原始的模型。
        - `user_name` (str, 可选): 用户名。
        - `text_list` (本文列表)
        - `top`(int,可选）默认为1，返回最相似的top条数据

        **响应:**
        - `results` (list[ReasonResultItem])  返回ReasonResultItem的列表，具体看ReasonResultItem说明
        """
    if not item.standrad_name:
        item.standrad_name = "standrad"
    standrad_text_list = find_standrad_text(db, item.standrad_name)

    if len(standrad_text_list) == 0:
        return JSONResponse(content={"message": f"未找到{item.standrad_name}标准库"},
                            status_code=status.HTTP_400_BAD_REQUEST)
    model_path1 = find_model_path(item.user_name, item.premodel_name, db)
    if not model_path1:
        model_path1 = rootpath() + "\\data\\models\\root\\" + item.premodel_name

    if not os.path.exists(model_path1):
        return JSONResponse(content={"message": f"未找到预训练库:{item.standrad_name}"},
                            status_code=status.HTTP_400_BAD_REQUEST)

    model = SentenceTransformer(model_path1)

    corpus_embeddings = model.encode(standrad_text_list, convert_to_tensor=True)

    reason_result_item = []
    for text in item.text_list:
        query_embedding = model.encode(text, convert_to_tensor=True)
        # 使用系统自带的函数来实现这个效率会更高;
        hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)

        hits = hits[0]  # Get the hits for the first query
        score_items: list[ScoreItem] = []
        for hit in hits:
            score_item = ScoreItem(text=standrad_text_list[hit['corpus_id']], score=hit['score'])
            score_items.append(score_item)

        result_item = ResultItem(origin_name=text, target_list=score_items)
        reason_result_item.append(result_item)

    return reason_result_item
