from datetime import datetime

import pandas as pd
from tqdm import tqdm

import time
from configs import app_config
from langchain_workflow.models import ProbabilityResultModel, ProductModel
from langchain_workflow.node import get_product_status, rag, remove_promotional_content,timing_decorator
from logger import logger


def main_workflow(product_model: ProductModel) -> ProbabilityResultModel:
    logger.info(f"开始处理商品 - {product_model}")
    # 合并标题和描述，并对商品和描述进行去除广告内容和主观内容
    product_remove_promotional_content_model = remove_promotional_content(product_model)
    logger.info(
        f"合并后的文本长度: {product_remove_promotional_content_model.rag_text}"
    )
    # rag
    prohibited_policy: str = rag(product_remove_promotional_content_model.rag_text)
    logger.info(f"RAG到的政策:\n{prohibited_policy}")
    # 判断结果
    result = get_product_status(
        product_remove_promotional_content_model, prohibited_policy
    )
    return result


def statis(results_df, validation_df):
    from sklearn.metrics import f1_score, precision_score, recall_score

    # 计算is_prohibited的指标
    y_true_prohibited = validation_df["is_prohibited"].astype(int)
    y_pred_prohibited = results_df["is_prohibited"].astype(int)
    f1_prohibited = f1_score(y_true_prohibited, y_pred_prohibited)
    precision_prohibited = precision_score(y_true_prohibited, y_pred_prohibited)
    recall_prohibited = recall_score(y_true_prohibited, y_pred_prohibited)
    true_prohibited_ratio = y_true_prohibited.mean()
    pred_prohibited_ratio = y_pred_prohibited.mean()

    # 计算requires_compliance_check的指标
    y_true_compliance = validation_df["requires_compliance_check"].astype(int)
    y_pred_compliance = results_df["requires_compliance_check"].astype(int)
    f1_compliance = f1_score(y_true_compliance, y_pred_compliance)
    precision_compliance = precision_score(y_true_compliance, y_pred_compliance)
    recall_compliance = recall_score(y_true_compliance, y_pred_compliance)
    true_compliance_ratio = y_true_compliance.mean()
    pred_compliance_ratio = y_pred_compliance.mean()

    # 计算总体指标（简单平均）
    f1_overall = (f1_prohibited + f1_compliance) / 2
    precision_overall = (precision_prohibited + precision_compliance) / 2
    recall_overall = (recall_prohibited + recall_compliance) / 2

    # 输出结果
    logger.info("-" * 50)
    logger.info("Metrics for is_prohibited:")
    logger.info(f"F1-score: {f1_prohibited:.4f}")
    logger.info(f"Precision: {precision_prohibited:.4f}")
    logger.info(f"Recall: {recall_prohibited:.4f}")
    logger.info(f"True positive ratio: {true_prohibited_ratio:.4f}")
    logger.info(f"Predicted positive ratio: {pred_prohibited_ratio:.4f}")
    logger.info("-" * 50)
    logger.info("Metrics for requires_compliance_check:")
    logger.info(f"F1-score: {f1_compliance:.4f}")
    logger.info(f"Precision: {precision_compliance:.4f}")
    logger.info(f"Recall: {recall_compliance:.4f}")
    logger.info(f"True positive ratio: {true_compliance_ratio:.4f}")
    logger.info(f"Predicted positive ratio: {pred_compliance_ratio:.4f}")
    logger.info("-" * 50)
    logger.info("Overall metrics:")
    logger.info(f"F1-score: {f1_overall:.4f}")
    logger.info(f"Precision: {precision_overall:.4f}")
    logger.info(f"Recall: {recall_overall:.4f}")

    # 保存预测错误的badcase
    merged_df = pd.merge(
        results_df, validation_df, on="pd_id", suffixes=("_pred", "_true")
    )

    # is_prohibited预测错误的badcase
    is_prohibited_badcases = merged_df[
        merged_df["is_prohibited_pred"] != merged_df["is_prohibited_true"]
    ]
    is_prohibited_badcase_filename = "data/is_prohibited_badcases.csv"
    is_prohibited_badcases.to_csv(is_prohibited_badcase_filename, index=False)
    logger.info(
        f"is_prohibited预测错误的badcase已保存到: {is_prohibited_badcase_filename}"
    )

    # requires_compliance_check预测错误的badcase
    compliance_check_badcases = merged_df[
        merged_df["requires_compliance_check_pred"]
        != merged_df["requires_compliance_check_true"]
    ]
    compliance_check_badcase_filename = "data/compliance_check_badcases.csv"
    compliance_check_badcases.to_csv(compliance_check_badcase_filename, index=False)
    logger.info(
        f"requires_compliance_check预测错误的badcase已保存到: {compliance_check_badcase_filename}"
    )


def test():
    # 开始时间从这里开始，包含加载文件的时间
    start_time1 = time.time()

    df = pd.read_csv(app_config.ANSWER_SHEET_PATH)
    is_300 = True
    if is_300:
        validation_df = pd.read_csv("validation/validation100.csv")
        # validation_df = pd.read_csv("/root/SHAI_MXZ/CrossBorderAIChallenge/validation/validation_sample_300.csv")
    else:
        validation_df = pd.read_csv("validation/validation3.csv")
    results = []
    for index, row in tqdm(
        validation_df.iterrows(), total=len(validation_df), desc="处理商品"
    ):
        pd_id = row["pd_id"]
        #获取指定id的产品标题
        pd_title = df.loc[df["pd_id"] == pd_id, "pd_title"].values[0]
        #获取指定商品id的商品描述
        pd_desc = df.loc[df["pd_id"] == pd_id, "pd_desc"].values[0]
        result = {"pd_id": row["pd_id"]}
        logger.info("----------------------------------")
        #创建产品模型
        product_model = ProductModel(
            pd_id=pd_id,
            pd_title=pd_title,
            pd_desc=pd_desc,
        )
        result_model: ProbabilityResultModel = main_workflow(product_model)
        result.update(result_model.model_dump())
        logger.info("----------------------------------")
        logger.info(f"处理完成: {result}")
        results.append(result)

    results_df = pd.DataFrame(results)
    # 结束时间，包含保存文档的时间
    logger.info(f"处理完成，总时间: {time.time() - start_time1:.2f} 秒")
    end_time1 = time.time()
    results_filename = f"data/test_300_bm25_vector_rerank_{app_config.RERANCK_TOP_K}_{datetime.now().strftime('%Y-%m-%d_%H')}.csv"
    results_df.to_csv(results_filename, index=False)
    logger.info(f"结果已保存到: {results_filename}")
    statis(results_df, validation_df)

@timing_decorator
def main():
    df = pd.read_csv(app_config.ANSWER_SHEET_PATH)
    results = []
    for index, row in tqdm(df.iterrows(), total=len(df), desc="处理商品"):
        pd_id = row.pd_id
        pd_title = row.pd_title
        pd_desc = row.pd_desc
        result = {"pd_id": row.pd_id}
        logger.info("----------------------------------")
        product_model = ProductModel(
            pd_id=pd_id,
            pd_title=pd_title,
            pd_desc=pd_desc,
        )

        result_model: ProbabilityResultModel = main_workflow(product_model)
        result.update(result_model.model_dump())
        logger.info("----------------------------------")
        logger.info(f"处理完成: {result}")
        results.append(result)

    results_df = pd.DataFrame(results)
    results_filename = f"data/bm25_vector_rerank_{app_config.RERANCK_TOP_K}_{datetime.now().strftime('%Y-%m-%d_%H')}.csv"
    results_df.to_csv(results_filename, index=False)
    logger.info(f"结果已保存到: {results_filename}")


if __name__ == "__main__":
    # save_to_db()
    # test()
    main()
