import json
import os
import random
import traceback

import chardet
import openai
import pandas as pd
import requests
import plotly.express as px
import streamlit as st
import logging
import sys
import time

from config import (JUDGEMENT_PROMPT, OPENAI_MODELS, DEEPSEEK_MODELS, TAGS_SEPARATORS, DEFAULT_TAGS_SEPARATOR, FIELD_TAGS, FIELD_FINAL_SCORE)


def request_dify(url, api_key, user, question):
    """
    向Dify API发送请求并返回处理后的响应
    :param url: Dify API的URL
    :param api_key: Dify的API密钥
    :param user: 用户标识
    :param question: 用户问题
    :return: 处理后的响应或错误信息
    """
    logger.info(f"开始向Dify API发送请求 - URL: {url}, 用户: {user}")
    
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json',
    }

    data = {
        "inputs": {},
        "query": question,
        "response_mode": "blocking",
        "conversation_id": "",
        "user": user
    }

    try:
        response = requests.post(url, headers=headers, json=data, timeout=30)
        response.raise_for_status()

        # logger.debug(response.text)
        result = response.json()
        logger.info("Dify API请求成功")

        if 'answer' in result:
            return result['answer']
        elif 'message' in result:
            return result['message']
        else:
            return result

    except requests.exceptions.RequestException as e:
        error_msg = f"API请求错误: {str(e)}\n {traceback.format_exc()}"
        logger.error(error_msg)
        return error_msg
    except json.JSONDecodeError as e:
        error_msg = f"无法解析响应: {response.text}, error: {e}, {traceback.format_exc()}"
        logger.error(error_msg)
        return error_msg

# Example usage:
# q = "AC200Max+Can I mix one B230 and one B300 on my AC200Max at the same time?"
# res = request_dify("http://192.168.41.62/v1/chat-messages", "app-mqCCfa0WMgb2aq94hf5Y9XxF", "abc-123", q )
# print(res)
# res = "Yes, you can connect up to 2 B230, B300, and B300K batteries to the AC200MAX, allowing you to mix-and-match by connecting up to two different types of batteries simultaneously."


def judge_by_ai(model, data, custom_prompt=None):
    """
    Use openai lib to connect model, and send the data with system prompt(in config.py JUDGEMENT_PROMPT or custom_prompt).
    param: model name
    data: json data which includes question, expected answer, actual answer, the 4 aspects
    custom_prompt: optional custom judgment prompt, if not provided, use JUDGEMENT_PROMPT from config
    """
    logger.info(f"开始使用AI模型进行评判 - 模型: {model}")
    
    user_message = json.dumps(data, ensure_ascii=False, indent=2)
    
    # 使用自定义提示词或默认提示词
    json_prompt = """
    The return format should be: JSON. 
    {
        "score": 8,
        "comment": "The reason of 8 points...."
    }
    """
    prompt_to_use = custom_prompt+json_prompt if custom_prompt else JUDGEMENT_PROMPT
    logger.debug(f"使用{'自定义' if custom_prompt else '默认'}提示词")
    
    if model in OPENAI_MODELS:
        api_key = os.getenv("OPENAI_API_KEY")
        api_url = os.getenv("OPENAI_API_URL")
        logger.debug("使用OpenAI模型配置")
    elif model in DEEPSEEK_MODELS:
        api_key = os.getenv("DEEPSEEK_API_KEY")
        api_url = os.getenv("DEEPSEEK_API_URL")
        logger.debug("使用Deepseek模型配置")
        
    client = openai.OpenAI(api_key=api_key, base_url=api_url)

    try:
        response = client.chat.completions.create(
            model=model,
            messages=[
                {"role": "system", "content": prompt_to_use},
                {"role": "user", "content": user_message}
            ],
            temperature=0.2,
        )

        result = response.choices[0].message.content
        result = result.replace('```json', '').replace('```', '')
        
        json_result = json.loads(result)
        logger.info(f"AI评判完成，得分: {json_result.get('score', 'N/A')}")
        return json_result
        
    except json.JSONDecodeError as e:
        error_msg = f"评估失败，无法解析模型响应: {str(e)} {traceback.format_exc()}"
        logger.error(error_msg)
        return {
            "score": 0,
            "comment": error_msg,
            "raw_response": result
        }
    except Exception as e:
        error_msg = f"AI评判过程中发生错误: {str(e)} {traceback.format_exc()}"
        logger.error(error_msg)
        return {
            "score": 0,
            "comment": error_msg,
            "raw_response": str(e)
        }

# load_dotenv()
# data = {'Problem Question': 'AC200MAX+not detecting the B230', 'Expected Answer': 'Ensure both ends of all connection cables are securely locked in place.\nPower on the battery packs manually to connect with the AC200MAX.\nTry using different expansion ports for the AC200MAX and the B230, as each unit has two ports.\nTry a new expansion cable.\nIf your issue persists, please contact BLUETTI Customer Service.', 'Problem Actual Response': 'The product model "AC200MAX+B230" is included in the mentioned product list. Here are some steps you can take to resolve the issue of the AC200MAX not detecting the B230:\n\n1. Ensure both ends of all connection cables are securely locked in place.\n   ![AC200M+2 B230 connection](https://oss.bluettipower.com/blu-marketing-center/192706a981e9d661413bf29fb5a.png)\n   ![P090D locked](https://oss.bluettipower.com/blu-marketing-center/1926ffd44489d661413bf29fad3.png)\n2. Power on the battery packs manually to connect with the AC200MAX.\n3. Try using different expansion ports for the AC200MAX and the B230, as each unit has two ports.\n4. Try a new expansion cable.\n5. If your issue persists, please contact BLUETTI Customer Service.', 'Accuracy': 0.7, 'Relevance': 0.7, 'Coherence': 0.3, 'Toxicity': 0.8, 'Tags': []}
# res = judge_by_ai("deepseek-ai/DeepSeek-V3", data)
# print(res)
# print(openai.Model.list())

def detect_encoding(file):
    """
    检测文件的编码
    """
    try:
        with open(file, 'rb') as f:
            raw_data = f.read()
            result = chardet.detect(raw_data)
            logger.debug(f"检测到文件编码: {result['encoding']}")
            return result['encoding']
    except Exception as e:
        logger.error(f"检测文件编码时出错: {str(e)} {traceback.format_exc()}")
        return None

def read_data_file(file):
    """
    读取数据文件（支持CSV、Excel、JSON格式）
    """
    try:
        file_extension = os.path.splitext(file.name)[1].lower()
        logger.info(f"开始读取文件: {file.name} (类型: {file_extension})")
        
        if file_extension == '.csv':
            detected_encoding = detect_encoding(file)
            logger.info(f"检测到文件 {file.name} 的编码为: {detected_encoding}")
            
            if not detected_encoding:
                encodings = ['utf-8', 'utf-8-sig', 'utf-16', 'utf-16le', 'utf-16be', 
                           'latin1', 'cp1252', 'iso-8859-1', 'windows-1256', 'gbk', 'gb2312', 'gb18030']
                logger.debug("使用默认编码列表尝试读取文件")
            else:
                encodings = [detected_encoding]
            
            for encoding in encodings:
                try:
                    df = pd.read_csv(file, encoding=encoding)
                    for col in df.select_dtypes(include=['object']).columns:
                        df[col] = df[col].astype(str).apply(lambda x: x.encode('utf-8', errors='ignore').decode('utf-8'))
                    logger.info(f"成功读取CSV文件: {file.name} (使用 {encoding} 编码)")
                    return df
                except UnicodeDecodeError:
                    logger.debug(f"使用编码 {encoding} 读取失败，尝试下一个编码")
                    continue
                except Exception as e:
                    logger.error(f"使用编码 {encoding} 读取文件时出错: {str(e)}, {traceback.format_exc()}")
                    continue
            
            error_msg = "无法读取文件，请确保文件使用正确的编码格式"
            logger.error(error_msg)
            raise Exception(error_msg)
            
        elif file_extension in ['.xlsx', '.xls']:
            try:
                df = pd.read_excel(file)
                for col in df.select_dtypes(include=['object']).columns:
                    df[col] = df[col].astype(str).apply(lambda x: x.encode('utf-8', errors='ignore').decode('utf-8'))
                logger.info(f"成功读取Excel文件: {file.name}")
                return df
            except Exception as e:
                error_msg = f"读取Excel文件时出错: {str(e)}, {traceback.format_exc()}"
                logger.error(error_msg)
                raise Exception(error_msg)
                
        elif file_extension == '.json':
            try:
                df = pd.read_json(file)
                logger.info(f"成功读取JSON文件: {file.name}")
                return df
            except Exception as e:
                error_msg = f"读取JSON文件时出错: {str(e)}, {traceback.format_exc()}"
                logger.error(error_msg)
                raise Exception(error_msg)
                
        else:
            error_msg = f"不支持的文件格式: {file_extension}"
            logger.error(error_msg)
            raise Exception(error_msg)
            
    except Exception as e:
        error_msg = f"读取文件时出错: {str(e)}, {traceback.format_exc()}"
        logger.error(error_msg)
        raise Exception(error_msg)

def split_tags(tags_str):
    """
    将tags字符串分割成列表，支持多种分隔符
    :param tags_str: 包含tags的字符串
    :return: tags列表
    """
    if not tags_str:
        return []
    
    for separator in TAGS_SEPARATORS:
        if separator in tags_str:
            tags = [tag.strip() for tag in tags_str.split(separator) if tag.strip()]
            logger.debug(f"使用分隔符 '{separator}' 分割tags: {tags}")
            return tags
    
    if tags_str.strip():
        logger.debug(f"未找到分隔符，使用整个字符串作为单个tag: {tags_str.strip()}")
        return [tags_str.strip()]
    return []

def join_tags(tags_list):
    """
    将tags列表合并成字符串，使用默认分隔符
    :param tags_list: tags列表
    :return: 合并后的字符串
    """
    if not tags_list:
        return ""
    result = DEFAULT_TAGS_SEPARATOR.join(tags_list)
    logger.debug(f"合并tags列表: {tags_list} -> {result}")
    return result

def filter_results_by_tags(results, selected_tags):
    """
    根据选定的tags过滤结果
    :param results: 测试结果列表
    :param selected_tags: 选定的tags列表
    :return: 过滤后的结果列表
    """
    if not selected_tags:
        logger.debug("未选择tags，返回所有结果")
        return results
    
    logger.info(f"开始按tags过滤结果 - 选中的tags: {selected_tags}")
    filtered_results = []
    for result in results:
        result_tags = result.get(FIELD_TAGS, [])
        if isinstance(result_tags, str):
            result_tags = split_tags(result_tags)
        
        if any(tag in result_tags for tag in selected_tags):
            filtered_results.append(result)
    
    logger.info(f"过滤完成 - 原始结果数: {len(results)}, 过滤后结果数: {len(filtered_results)}")
    return filtered_results

def generate_analysis_report(results, selected_tags=None):
    """
    生成分析报告，包括饼图数据
    :param results: 测试结果列表
    :param selected_tags: 选定的tags列表
    :return: 包含分析数据的字典
    """
    logger.info("开始生成分析报告")
    
    if selected_tags:
        logger.info(f"使用选定的tags进行过滤: {selected_tags}")
        results = filter_results_by_tags(results, selected_tags)
    
    if not results:
        logger.warning("没有可分析的结果")
        return {
            "score_distribution": [],
            "total_count": 0,
            "average_score": 0,
            "tag_distribution": {}
        }
    
    df = pd.DataFrame(results)
    score_counts = df[FIELD_FINAL_SCORE].value_counts().sort_index()
    
    all_tags = []
    for result in results:
        tags = result.get(FIELD_TAGS, [])
        if isinstance(tags, str):
            tags = split_tags(tags)
        all_tags.extend(tags)
    
    tag_counts = pd.Series(all_tags).value_counts()
    
    report = {
        "score_distribution": score_counts.to_dict(),
        "total_count": len(results),
        "average_score": df[FIELD_FINAL_SCORE].mean(),
        "tag_distribution": tag_counts.to_dict()
    }
    
    logger.info(f"分析报告生成完成 - 总数: {report['total_count']}, 平均分: {report['average_score']:.2f}")
    return report

def create_score_pie_chart(score_distribution):
    """
    创建分数分布的饼图
    :param score_distribution: 分数分布字典
    :return: plotly图表对象
    """
    logger.debug("创建分数分布饼图")
    df = pd.DataFrame({
        'Score': score_distribution.keys(),
        'Count': score_distribution.values()
    })
    
    fig = px.pie(df, values='Count', names='Score', 
                 title='分数分布',
                 color_discrete_sequence=px.colors.sequential.RdBu)
    
    return fig

def create_tag_bar_chart(tag_distribution):
    """
    创建标签分布的条形图
    :param tag_distribution: 标签分布字典
    :return: plotly图表对象
    """
    logger.debug("创建标签分布条形图")
    df = pd.DataFrame({
        'Tag': tag_distribution.keys(),
        'Count': tag_distribution.values()
    })
    
    fig = px.bar(df, x='Tag', y='Count',
                 title='标签分布',
                 color='Count',
                 color_continuous_scale=px.colors.sequential.RdBu)
    
    return fig

def show_analysis_report(results, selected_tags=None):
    """
    显示分析报告的通用函数
    :param results: 测试结果列表
    :param selected_tags: 选定的tags列表
    """
    logger.info("开始显示分析报告")
    
    if not results:
        logger.warning("没有测试结果可供分析")
        st.warning("暂无测试结果，请先进行测试并评分。")
        return
    
    all_tags = set()
    for result in results:
        tags = result.get(FIELD_TAGS, [])
        if isinstance(tags, str):
            tags = split_tags(tags)
        all_tags.update(tags)
    
    logger.debug(f"可选的tags: {list(all_tags)}")
    selected_tags = st.multiselect(
        "选择要分析的标签",
        options=list(all_tags),
        default=selected_tags if selected_tags else []
    )
    
    analysis_data = generate_analysis_report(results, selected_tags)
    
    col1, col2 = st.columns(2)
    with col1:
        st.metric("总测试数量", analysis_data["total_count"])
    with col2:
        st.metric("平均分数", f"{analysis_data['average_score']:.2f}")
    
    if analysis_data["score_distribution"]:
        logger.debug("显示分数分布饼图")
        st.plotly_chart(create_score_pie_chart(analysis_data["score_distribution"]))
    
    if analysis_data["tag_distribution"]:
        logger.debug("显示标签分布条形图")
        st.plotly_chart(create_tag_bar_chart(analysis_data["tag_distribution"]))
    
    return selected_tags


log_dir = os.path.join(os.path.dirname(__file__), "logs")

def _reset_logger(log):
    for handler in log.handlers:
        handler.close()
        log.removeHandler(handler)
        del handler
    log.handlers.clear()
    log.propagate = False
    console_handle = logging.StreamHandler(sys.stdout)
    console_handle.setFormatter(
        logging.Formatter(
            "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        )
    )
    
    # 确保日志目录存在
    os.makedirs(log_dir, exist_ok=True)
    
    logFilename = os.path.join(log_dir, time.strftime("%Y_%m_%d", time.localtime()) + '.log')
    file_handle = logging.FileHandler(logFilename, encoding="utf-8")
    file_handle.setFormatter(
        logging.Formatter(
            "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        )
    )
    log.addHandler(file_handle)
    log.addHandler(console_handle)


def _get_logger():
    log = logging.getLogger("log")
    _reset_logger(log)
    log.setLevel(logging.DEBUG)
    return log


# 日志句柄
logger = _get_logger()