# -*- coding: utf-8 -*-
import os
import pandas as pd
import json
import logging
from openai import OpenAI
import shutil
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('processing.log'),
        logging.StreamHandler()
    ]
)

# 初始化客户端
client = OpenAI(
    api_key="sk-5uqqDiWSWxEXjjQkN28yazPbb7pWjH7ZosrJbCZpSj5ryaYK",
    base_url="https://tbnx.plus7.plus/v1"
)


def validate_and_fix_data(data):
    """验证并修复数据结构"""
    required_keys = ['comment', 'characterization', 'target', 'opinion', 'stance']

    # 处理列表类型响应
    if isinstance(data, list):
        if len(data) > 0:
            data = data[0]
        else:
            data = {}

    # 确保所有字段存在
    for key in required_keys:
        if key not in data:
            data[key] = ""

    # 修正可能的类型错误
    if not isinstance(data.get('characterization', ''), str):
        data['characterization'] = str(data['characterization'])

    return data


def generate_optimized_comment(index, row):
    """生成优化评论（带重试机制）"""
    topic = row['topic']
    content = row['content']

    prompt = f"""
我现在需要构建一个开放域立场检测数据集，但是现在需要数据集要有挑战性表征，比如语义隐含性、多层级推理需求、文化嵌入性表达及复合型反讽结构等等。我现在通过网络爬取了一些数据，但是需要你帮我修改评论，尽量符合多层级推理需求、文化嵌入性表达这两种，因为目前这两类数据最缺。
构建立场检测三元组需满足以下规范：
【目标识别准则】
1. 提取评论直接指涉的最小语义单元实体
2. 保留字面指称，不做语义引申

【观点提取规范】
1. 剥离情感修饰词（如"哈哈"、"！"）
2. 表达对目标的具体立场观点是什么

【极性判定标准】
基于目标-观点对判断：
1. 支持：认可目标
2. 反对：否定目标  
3. 中立：仅陈述事实无倾向

事件主题：{topic}
原始评论：{content}

生成要求：
1. 修改后的评论需含一种挑战性表征，最好生成多层级推理或者文化嵌入性这两种类型，因为目前数据集这两者最缺。
   - 语义隐含性（如隐喻、借代）
   - 多层级推理（需因果链解析）  
   - 文化嵌入性
   - 复合反讽（表层语义与深层意图背离）
2. 输出JSON格式：
  "comment": "修改后评论(注意，这个评论需要满足中文社交媒体的表达风格，这个很重要。)",
  "characterization": "挑战类型", 
  "target": "精确语义单元",
  "opinion": "去情感化主张",
  "stance": "支持/反对/中立"
 """

    for attempt in range(3):
        try:
            response = client.chat.completions.create(
                model="deepseek-chat",
                messages=[
                    {"role": "system", "content": "你是擅长社会语义分析的专家"},
                    {"role": "user", "content": prompt}
                ],
                temperature=0.7,
                max_tokens=500
            )

            raw_result = response.choices[0].message.content

            # 清理响应内容
            if '```json' in raw_result:
                json_str = raw_result.split('```json')[1].split('```')[0].strip()
            else:
                json_str = raw_result.strip()

            print(f"{index}+{raw_result}")
            # 解析并验证数据
            parsed_data = json.loads(json_str)
            validated_data = validate_and_fix_data(parsed_data)

            return (index, validated_data)

        except (json.JSONDecodeError, KeyError, IndexError) as e:
            logging.warning(f"索引{index} 解析失败，第{attempt + 1}次重试，错误：{str(e)}")
            time.sleep(2)
        except Exception as e:
            logging.error(f"索引{index} API调用失败：{str(e)}")
            break

    return (index, {
        "comment": content,
        "characterization": "无需修改",
        "target": "",
        "opinion": "",
        "stance": ""
    })


def safe_dataframe_update(df, index, result):
    """线程安全的数据更新"""
    try:
        df.at[index, 'comment_opt'] = result.get('comment', '')
        df.at[index, 'target_opt'] = result.get('target', '')
        df.at[index, 'stance_opt'] = result.get('stance', '')
        df.at[index, 'opinion_opt'] = result.get('opinion', '')
        df.at[index, 'characterization'] = result.get('characterization', '')
    except Exception as e:
        logging.error(f"索引{index} 数据更新失败：{str(e)}")
    return df


def process_excel(input_file, output_file):
    """增强版处理流程"""
    df = pd.read_excel(input_file)

    # 初始化新列
    new_columns = ['comment_opt', 'target_opt', 'stance_opt', 'opinion_opt', 'characterization']
    for col in new_columns:
        if col not in df.columns:
            df[col] = pd.NA

    # 创建备份目录
    backup_dir = os.path.join(os.path.dirname(output_file), "backups")
    os.makedirs(backup_dir, exist_ok=True)

    # 并行处理配置
    max_workers = 10
    save_interval = 100
    last_backup = 0

    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = {executor.submit(generate_optimized_comment, index, row.to_dict()): index
                   for index, row in df.iterrows()}

        try:
            for future in as_completed(futures):
                index, result = future.result()
                df = safe_dataframe_update(df, index, result)

                # 进度保存
                current_count = index + 1
                if current_count % save_interval == 0:
                    df.to_excel(output_file, index=False)
                    logging.info(f"█ 已保存进度：{current_count}/{len(df)}")

                # 时间间隔备份（每10分钟）
                if time.time() - last_backup > 600:
                    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                    backup_path = os.path.join(backup_dir, f"backup_{timestamp}.xlsx")
                    df.to_excel(backup_path, index=False)
                    last_backup = time.time()
                    logging.info(f"★ 定时备份成功：{backup_path}")

        except KeyboardInterrupt:
            logging.warning("用户中断，正在保存当前进度...")
            df.to_excel(output_file, index=False)
            logging.info("进度已保存，退出程序")
            exit()

    # 最终保存和备份
    df.to_excel(output_file, index=False)
    final_backup = os.path.join(backup_dir, "final_backup.xlsx")
    shutil.copy(output_file, final_backup)
    logging.info(f"处理完成！最终备份已保存至：{final_backup}")


if __name__ == "__main__":
    input_file = r"C:\Users\23248\PycharmProjects\stance\DataCrawler\data2\test样本.xlsx"
    output_file = r"C:\Users\23248\PycharmProjects\stance\DataCrawler\data2\test样本_output4.xlsx"

    # 检查输入文件是否存在
    if not os.path.exists(input_file):
        logging.error(f"输入文件不存在：{input_file}")
        exit(1)

    # 启动处理
    start_time = time.time()
    process_excel(input_file, output_file)
    logging.info(f"总耗时：{time.time() - start_time:.2f}秒")