import pandas as pd
import matplotlib.pyplot as plt
import os
import json
import time
from openai import OpenAI
import ast
import csv
import numpy as np
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.preprocessing import MinMaxScaler
from scipy.spatial.distance import cdist

# 设置 API 变量
os.environ["OPENAI_API_KEY"] = "sk-aswwzdvvimeybiiokqebixpkhbmcftlbgkubssfuodifqjcf"
# 初始化OpenAI客户端
client = OpenAI(
    base_url="https://api.siliconflow.cn/v1",
    api_key=os.getenv("OPENAI_API_KEY")  # 获取环境变量
)

# 提取 csv 文本
def extract_text_from_csv(csv_path):
    with open(csv_path, mode='r', encoding='utf-8') as csv_file:
        processed_lines = []
        # 逐行读取文件
        for line in csv_file:
            line = line.strip()  # 去掉换行符和首尾空格
            if line:  # 检查是否非空行
                processed_lines.append(line)
        return processed_lines

# 为每个文本块生成嵌入向量，通过timpe.sleep控制速率，避免API调用频率过高
def create_embeddings(texts, model="BAAI/bge-m3"):
    print("开始转向量")
    embeddings = []
    print(texts)
    for text in texts:
        response = client.embeddings.create(model=model, input=text)
        embeddings.append(response.data[0].embedding)
    print("结束转向量")
    return embeddings

# 生成 AI 响应
def generate_response(system_prompt, user_message, model):
    print("开始生成")
    response = client.chat.completions.create(
        model=model,
        # 生成的随机性，越接近0越稳定
        temperature=0,
        max_tokens=16384,
        # system系统提示词 user用户输入
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_message}
        ]
    )
    print("生成结束")
    # 返回响应结果
    return response

# 使用余弦相似度公式计算两个向量的相似度。
def cosine_similarity(vec1, vec2):
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

def semantic_search(data_day_embedding, query_day_embedding, data_day,k=3):
    similarity_scores = []
    # 索引 待比 资料向量
    for i, chunk_embedding in enumerate(data_day_embedding):
        # 问题向量 资料向量
        similarity_score = cosine_similarity(np.array(query_day_embedding), np.array(chunk_embedding))
        print(similarity_score)
        similarity_scores.append((i, similarity_score))
    # 根据相似分进行排序
    similarity_scores.sort(key=lambda x: x[1], reverse=True)
    # 记录考前的文本块合集
    top_indices = [index for index, _ in similarity_scores[:k]]
    return [data_day[index] for index in top_indices]

def dtw_search(data_day_embedding, query_day_embedding, data_day, k=3):
    similarity_scores = []
    # 确保 query_day_embedding 是二维数组
    query_day_embedding_np = np.array(query_day_embedding).squeeze()
    for i, chunk_embedding in enumerate(data_day_embedding):
        chunk_embedding_np = np.array(chunk_embedding).squeeze()
        # 逐时间步计算 DTW，每个时间步都是 1D 向量
        distance, _ = fastdtw(query_day_embedding_np, chunk_embedding_np,
                              dist=lambda x, y: euclidean(x.reshape(-1), y.reshape(-1)))
        similarity_scores.append((i, distance))
    # 根据 DTW 距离排序，距离越小表示越相似
    similarity_scores.sort(key=lambda x: x[1])
    # 获取最相似的 k 个数据块
    top_indices = [index for index, _ in similarity_scores[:k]]
    return [data_day[index] for index in top_indices]

def euclidean_distance(vec1, vec2):
    return np.linalg.norm(np.array(vec1) - np.array(vec2))

#===================================================================
# 设置数据集 ETTh1电力变压器 或者 demand建筑数据
# ===================================================================
data1 = "ETTh1资料"
data1_1 = "ETTh1问题"
dateL1 = "date"
data1_2 = "ETTh1填充后"
# data1 = "demand资料"
# data1_1 = "demand问题"
# dateL1 = "timestamp"
# data1_2 = "demand填充后"
# ===================================================================
# 1.对缺失数据进行时间序列插值处理，生成 “ 插补后的问题 ”，方便后续比对
# ===================================================================

df = pd.read_csv(f"data/{data1_1}.csv", parse_dates=[dateL1])
df[dateL1] = pd.to_datetime(df[dateL1])  # 转换为时间格式
df.set_index(dateL1, inplace=True)
df_interpolated = df.interpolate(method="time")  # 时间序列插值
df_interpolated.to_csv(f"data/{data1_2}.csv", index=True)
print(f"插值填充完成，已保存到 '{data1_2}.csv'")

# ===================================================================
# 2.资料，问题，插补后的问题，进行向量化处理
# ===================================================================

# 模型选择
tell_model = "Qwen/QwQ-32B"
vector_model = "BAAI/bge-m3"
chunk_size = 24
# 资料进行转向量
data_path = f"data/{data1}.csv"
data_csv = extract_text_from_csv(data_path)
data_csv = data_csv[1:]  # 跳过第一行
data_day = [data_csv[i:i + chunk_size] for i in range(0, len(data_csv), chunk_size)]
data_day_embedding = create_embeddings(data_day)
# 问题进行转向量
query_path = f"data/{data1_1}.csv"
query_csv = extract_text_from_csv(query_path)
query_csv = query_csv[1:]  # 跳过第一行
query_day = [query_csv[i:i + chunk_size] for i in range(0, len(query_csv), chunk_size)]
query_day_embedding = create_embeddings(query_day)
# 插补后的问题进行转向量
query2_path = f"data/{data1_2}.csv"
query2_csv = extract_text_from_csv(query2_path)
query2_csv = query2_csv[1:]
query2_day = [query2_csv[i:i + chunk_size] for i in range(0, len(query2_csv), chunk_size)]
query2_day_embedding = create_embeddings(query2_day)

# ===================================================================
# 进行数据波动测算，波动大-DTW检索匹配，波动小-归一化处理，欧式距离
# ===================================================================

std_values = df_interpolated.std()
print(std_values)
total_std = std_values.sum()
# 自动计算关键变量的数量（列数）
num_variables = len(std_values)
# 计算平均标准差
avg_std = total_std / num_variables
print("平均标准差：", avg_std)
# 判断是否大于 1
if avg_std > 1:
    print("数据波动较大，平均标准差 > 1,采用DTW检索方式")
    # 确保数据格式正确
    query2_day_embedding_np = np.array(query2_day_embedding)  # 转换为 NumPy 数组
    data_day_embedding_np = np.array(data_day_embedding)
    top_matches = dtw_search(data_day_embedding_np, query2_day_embedding_np, data_day, k=3)
    print(top_matches)
    # 设置系统提示词
    system_prompt = ("请严格根据提供的上下文回答问题，不提供任何额外解释，仅返回预测结果。")
    # 创建用户输入
    user_prompt = f"优先参考资料{top_matches}\n Question: {query_day}\n请预测缺失的 2016/7/25 3:00 的数据，并保持资料中的格式及小数点后14位的数据精度。无需提供解释，仅返回结果。"
    # 生成 AI 回答
    print(system_prompt+user_prompt)
    ai_response = generate_response(system_prompt, user_prompt, tell_model)
    print(ai_response.choices[0].message.content)
else:
    print("数据较稳定，平均标准差 ≤ 1，采用欧式距离检索方式")
    # 原数据资料进行Min-Max归一化处理
    df = pd.read_csv(f"data/{data1}.csv", parse_dates=[dateL1])  # 确保 `date` 列解析为日期
    df.set_index(dateL1, inplace=True)  # 设定时间索引
    numeric_cols = df.select_dtypes(include=["number"]).columns
    scaler = MinMaxScaler()
    df[numeric_cols] = scaler.fit_transform(df[numeric_cols])
    df.to_csv(f"data/{data1}标准化.csv")
    print(f"标准化完成，存入{data1}标准化.csv")
    # 填充后的进行Min-Max归一化处理
    df_1 = pd.read_csv(f"data/{data1_2}.csv", parse_dates=[dateL1])  # 确保 `date` 列解析为日期
    df_1.set_index(dateL1, inplace=True)  # 设定时间索引
    numeric_cols = df_1.select_dtypes(include=["number"]).columns
    scaler = MinMaxScaler()
    df_1[numeric_cols] = scaler.fit_transform(df_1[numeric_cols])
    df_1.to_csv(f"data/{data1_2}标准化.csv")
    print(f"标准化完成，存入{data1_2}标准化.csv")
    # 标准化资料 转向量
    data1_standardized_path = f"data/{data1}标准化.csv"
    data1_csv = extract_text_from_csv(data1_standardized_path)[1:]  # 跳过第一行（标题）
    chunk_size = 48  # 假设块大小为 10
    data1_day = [data1_csv[i:i + chunk_size] for i in range(0, len(data1_csv), chunk_size)]
    print(f"数据切块完成，共 {len(data1_day)} 块")
    data1_day_embedding = create_embeddings(data1_day)
    # 标准化问题 转向量
    data1_2_standardized_path = f"data/{data1_2}标准化.csv"
    data1_2csv = extract_text_from_csv(data1_2_standardized_path)[1:]  # 跳过第一行（标题）
    data1_2_day_embedding = create_embeddings(data1_2csv)
    eu_sort = []
    for i,data1_text in enumerate(data1_day_embedding):
        store = euclidean_distance(data1_text,data1_2_day_embedding)
        eu_sort.append((i,store))
    top3 = [x[0] for x in sorted(eu_sort, key=lambda x: x[1])[:3]]
    print(top3)
    best_matches = [data1_day[i] for i in top3]
    print("最匹配的 3 个块数据：", best_matches)
    # 设置系统提示词
    system_prompt = ("请严格根据提供的上下文回答问题，不提供任何额外解释，仅返回预测结果。")
    # 创建用户输入
    user_prompt = f"优先参考资料{best_matches}\n Question: {query_day}\n请预测缺失的数据，并保持资料中的格式及小数点的数据精度。无需提供解释，仅返回结果。"
    # 生成 AI 回答
    print(system_prompt + user_prompt)
    ai_response = generate_response(system_prompt, user_prompt, tell_model)
    print(ai_response.choices[0].message.content)