import random
import numpy as np
import torch
import json
import os
import time
from datetime import datetime, timezone, timedelta
import requests
from torch.utils.data import Dataset, DataLoader
import multiprocessing
import re
from itertools import combinations
from functools import lru_cache
import pickle
import logging
import jieba

def shuffleDict(d):
    keys = list(d.keys())
    random.shuffle(keys)
    return {key: d[key] for key in keys}

def fix_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

def print_now(return_flag=0):
    t_delta = timedelta(hours=8)
    CST = timezone(t_delta, 'CST')
    now = datetime.now(CST)
    str_now = now.strftime('%Y/%m/%d %H:%M:%S')
    if return_flag:
        return str_now
    else:
        print(str_now)

def file_to_string(filename):
    try:
        with open(filename, 'r', encoding='utf-8') as file:
            content = file.read().strip()
            if not content:
                logging.warning(f"文件 {filename} 内容为空，使用默认值")
                return "无阈值规则提供"
            return content
    except FileNotFoundError:
        logging.error(f"文件未找到: {filename}")
        return "无阈值规则提供"
    except Exception as e:
        logging.error(f"读取文件 {filename} 时出错: {str(e)}")
        return "无阈值规则提供"

@lru_cache(maxsize=1000)
def get_embedding_zetatechs(text, api_key, api_base):
    cache_dir = "embedding_cache"
    os.makedirs(cache_dir, exist_ok=True)
    cache_file = os.path.join(cache_dir, f"{hash(text)}.pkl")
    
    if os.path.exists(cache_file):
        with open(cache_file, 'rb') as f:
            return pickle.load(f)
    
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }
    payload = {
        "model": "text-embedding-ada-002",
        "input": text
    }
    max_retries = 3
    for attempt in range(max_retries):
        try:
            response = requests.post(f"{api_base}/embeddings", json=payload, headers=headers, timeout=3600)
            response.raise_for_status()
            embedding = response.json()["data"][0]["embedding"]
            with open(cache_file, 'wb') as f:
                pickle.dump(embedding, f)
            return embedding
        except Exception as e:
            print(f"Attempt {attempt+1}: Error: {e}")
            time.sleep(2 ** attempt)
    print(f"Failed to get embedding for text: {text[:50]}...")
    return []

def decoder_for_zetatechs(args, input_messages, max_length):
    api_base = "https://api.zetatechs.com/v1"
    api_key = os.getenv("ZETA_API_KEY", "sk-p4Cown7yubjz4gETR5BGLJxBkWAQmBrCCidgt3RULy2giXFq")
    headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
    payload = {
        "model": args.model,
        "messages": input_messages,
        "max_tokens": max_length,
        "temperature": args.temperature,
        "top_p": 1.0
    }
    
    max_retries = 3
    for attempt in range(max_retries):
        try:
            response = requests.post(f"{api_base}/chat/completions", json=payload, headers=headers, timeout=3600)
            response.raise_for_status()
            content = response.json().get("choices", [{}])[0].get("message", {}).get("content", "").strip()
            if content:
                return content
        except Exception as e:
            print(f"API Error (Attempt {attempt+1}): {e}")
        time.sleep(2 ** attempt)
    return ""
def value_function(query, coalition, user_information, path_history, get_embedding, api_key, api_base, w1=0.25, w2=0.45, w3=0.3):
    accuracy = 0.0
    count = 0
    for player in coalition:
        if query in path_history and player in path_history[query]:
            accuracy += path_history[query][player][0]
            count += 1
        else:
            accuracy += 0.7
            count += 1
    accuracy = accuracy / count if count > 0 else 0.0
    
    relevance = 0.0
    query_emb = get_embedding(query)
    if query_emb:
        similarities = []
        for player in coalition:
            player_desc = user_information[player]
            player_emb = get_embedding(player_desc)
            if player_emb:
                dot_product = sum(a * b for a, b in zip(query_emb, player_emb))
                norm_query = sum(a * a for a in query_emb) ** 0.5
                norm_player = sum(b * b for b in player_emb) ** 0.5
                similarity = dot_product / (norm_query * norm_player + 1e-10)
                if any(kw in player_desc.lower() for kw in ["数据", "趋势", "安全", "放炮", "设备", "传感器"]):
                    similarity *= 1.2
                similarities.append(similarity)
        relevance = sum(similarities) / max(1.0, len(similarities) * 0.9) if similarities else 0.0
    
    # 计算响应效率
    response_time = 0.0
    count = 0
    for player in coalition:
        if query in path_history and player in path_history[query]:
            response_time += path_history[query][player][1]  # 假设path_history记录了响应时间
            count += 1
        else:
            response_time += 0.1  # 默认响应时间
            count += 1
    response_time = response_time / count if count > 0 else 0.1
    timeliness = 1.0 / (response_time + 1e-10)  # 响应效率为响应时间的倒数
    
    context_relevance = 0.0
    event_scores = {"设备调整": 0.8, "放炮作业": 1.0, "更换センサー": 0.9, "设备更换": 0.9, "无": 0.3}
    status_scores = {"低电量": 0.9, "正常": 0.4, "异常": 0.8}
    event_match = re.findall(r'事件=([^，。]+)', query)
    status_match = re.findall(r'状态=([^，。]+)', query)
    event_score = event_scores.get(event_match[0] if event_match else "无", 0.3)
    status_score = status_scores.get(status_match[0] if status_match else "正常", 0.4)
    context_relevance = (event_score + status_score) / 2.0
    if event_match:
        event = event_match[0].lower()
        for player in coalition:
            player_desc = user_information[player].lower()
            if event in player_desc or any(kw in player_desc for kw in ["放炮", "设备", "传感器", "趋势", "安全"]):
                context_relevance += 0.05
    
    value = w1 * accuracy + w2 * relevance + w3 * timeliness
    size_bonus = 0.0
    history_match = re.search(r'历史=$$ (.*?) $$', query)
    if len(coalition) >= 3 and (history_match or event_match and event_match[0] in ["放炮作业", "设备调整", "设备更换"]):
        size_bonus = 0.03 * (len(coalition) - 2)
    elif len(coalition) == 2 and not history_match and event_match and event_match[0] in ["无", "设备更换"]:
        size_bonus = 0.04
    value += size_bonus
    logging.debug(f"联盟 {coalition}: accuracy={accuracy}, relevance={relevance}, timeliness={timeliness}, context_relevance={context_relevance}, size_bonus={size_bonus}, value={value}")
    return value

def shapley_value(player, all_players, query, user_information, path_history, get_embedding, api_key, api_base, n_samples=10):
    n = len(all_players)
    shapley = 0.0
    players_without_i = [p for p in all_players if p != player]
    
    task_type = "原因归因" if "原因" in query else "状态识别" if "是否超标" in query else "策略推荐"
    w1 = 0.5 if task_type == "状态识别" else 0.3
    w2 = 0.4 if task_type == "原因归因" else 0.3
    w3 = 0.3 if task_type == "策略推荐" else 0.2
    
    for _ in range(n_samples):
        random.shuffle(players_without_i)
        coalition = []
        v_s = value_function(query, coalition, user_information, path_history, get_embedding, api_key, api_base, w1, w2, w3)
        logging.debug(f"采样 coalition={coalition}, v_s={v_s}")
        
        for p in players_without_i + [player]:
            coalition.append(p)
            v_s_next = value_function(query, coalition, user_information, path_history, get_embedding, api_key, api_base, w1, w2, w3)
            logging.debug(f"采样 coalition={coalition}, v_s_next={v_s_next}")
            if p == player:
                shapley += (v_s_next - v_s) / n_samples
            v_s = v_s_next
    
    logging.info(f"玩家 {player} 的 Shapley 值: {shapley}")
    return shapley

class Decoder:
    def __init__(self):
        pass
    
    def decode(self, args, input, max_length):
        return decoder_for_zetatechs(args, input, max_length)

def data_reader(args):
    questions = []
    answers = []
    
    if args.dataset == "coal_mine_sensors":
        file_path = os.path.join(args.data_path, args.dataset, "test.jsonl")
        with open(file_path, encoding='utf-8') as f:
            for line_num, line in enumerate(f, 1):
                try:
                    line = line.strip()
                    if not line:
                        continue
                    data = json.loads(line)
                    if not isinstance(data, dict) or "question" not in data or "answer" not in data:
                        print(f"无效数据格式在第 {line_num} 行: {line}")
                        continue
                    answer = data["answer"]
                    if not isinstance(answer, str):
                        print(f"answer 不是字符串格式在第 {line_num} 行: {answer}")
                        continue
                    questions.append(data["question"])
                    answers.append(answer)
                except json.JSONDecodeError as e:
                    print(f"JSON 解析错误在第 {line_num} 行: {e}")
                    continue
    
    return questions, answers, sum(len(q.split()) for q in questions) / len(questions)

class MyDataset(Dataset):
    def __init__(self, questions, answers):
        self.questions = questions
        self.answers = answers
    
    def __len__(self):
        return len(self.questions)
    
    def __getitem__(self, idx):
        return {
            "question": self.questions[idx],
            "answer": self.answers[idx]
        }

def setup_data_loader(args):
    fix_seed(args.random_seed)
    num_workers = max(1, multiprocessing.cpu_count() // 2)
    questions, answers, avg_words = data_reader(args)
    dataset = MyDataset(questions, answers)
    return DataLoader(dataset, batch_size=1, shuffle=False, num_workers=num_workers)

def normalize_text(text):
    text = text.strip()
    text = re.sub(r'\s+', ' ', text)
    text = re.sub(r'[：:]\s*', ':', text)
    text = re.sub(r'[，,。！!？?]', '', text)
    return text.lower()

def is_complete_sentence(text):
    if not text or len(text.strip()) < 3:
        return False
    if text.endswith(('**', '*', '：', ':', '，', ',')) or '...' in text:
        return False
    return any(c for c in text if '\u4e00' <= c <= '\u9fff') and len(text) >= 5

import re
import logging

def answer_cleansing(args, response, dataset):
    response = response.strip() if response else ""
    if not response:
        return {}
    
    # 定义标准字段值
    valid_alarm_types = ["超限报警", "标校", "传感器断线", "基站不通", "未知"]
    valid_alarm_levels = ["一级", "二级", "三级", "未知"]
    valid_is_exceeded = ["是", "否", "未知"]
    
    # 正则表达式提取字段
    patterns = {
        "is_exceeded": r"是否超标：\s*([是|否|未知]+)",
        "alarm_type": r"报警类型：\s*([^\n，。]+)",
        "alarm_level": r"报警级别：\s*([一级|二级|三级|未知]+)",
        "reason": r"原因：\s*([^\n，。]+)",
        "suggestion": r"建议：\s*([^\n，。]+)"
    }
    
    result = {}
    for key, pattern in patterns.items():
        match = re.search(pattern, response, re.UNICODE)
        if match:
            value = match.group(1).strip()
            # 去除冗余词语
            value = re.sub(r"^(为|是：|为：|是\s+|：)", "", value).strip()
            # 规范化字段值
            if key == "alarm_type" and value not in valid_alarm_types:
                value = "超限报警" if "超限" in value else "未知"
            elif key == "alarm_level" and value not in valid_alarm_levels:
                value = "未知"
            elif key == "is_exceeded" and value not in valid_is_exceeded:
                value = "未知"
            result[key] = value
        else:
            result[key] = "未知"
    
    # 如果原因或建议为空，使用默认值
    if not result.get("reason") or result["reason"] == "未知":
        result["reason"] = "未提供具体原因"
    if not result.get("suggestion") or result["suggestion"] == "未知":
        result["suggestion"] = "建议继续监测气体浓度并检查传感器状态"
    
    logging.debug(f"清洗后的答案: {result}")
    return result
def answer_cleansing(args, pred, dataset, sub_task="generic_task"):
    if not pred or not isinstance(pred, str):
        logging.warning(f"无效的预测值: {pred}")
        return {
            "is_exceeded": "未知",
            "alarm_type": "未知",
            "alarm_level": "未知",
            "reason": "",
            "suggestion": ""
        }

    logging.debug(f"原始响应: {repr(pred)}")

    # 预处理，去除 markdown、空格等
    def clean_text(text):
        text = re.sub(r'\*\*([^*]+)\*\*', r'\1', text)  # 去粗体
        text = re.sub(r'[\[\]“”"]', '', text)  # 去括号和引号
        text = re.sub(r'\s+', ' ', text)  # 合并空格
        return text.strip()

    pred = clean_text(pred)

    cleaned = {
        "is_exceeded": "未知",
        "alarm_type": "未知",
        "alarm_level": "未知",
        "reason": "",
        "suggestion": ""
    }

    # 只提取“综上所述”或最后的输出段落
    match_section = re.search(r'(综上所述.*?)(是否超标|输出结果|$)', pred)
    tail = pred
    if match_section:
        tail = pred[match_section.start():]  # 截取“综上所述”后的部分

    # 定义字段提取规则（容错格式）
    patterns = {
        "is_exceeded": r'是否超标\s*[:：=]?\s*(是|否|未知)',
        "alarm_type": r'报警类型\s*[:：=]?\s*([\u4e00-\u9fa5]+)',
        "alarm_level": r'报警级别\s*[:：=]?\s*(一级|二级|三级|未知)',
        "reason": r'原因\s*[:：=]?\s*(.*?)(建议|$)',
        "suggestion": r'建议\s*[:：=]?\s*(.*)'
    }

    for key, pattern in patterns.items():
        match = re.search(pattern, tail)
        if match:
            cleaned[key] = match.group(1).strip()

    logging.debug(f"[简洁清洗] 清理后结果: {cleaned}")
    return cleaned
