import os
os.environ['HF_ENDPOINT']='https://hf-mirror.com'
import os
import re
import sqlite3
import pandas as pd
import torch
import numpy as np
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from peft import PeftModel
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
import time

# --- 1. 全局配置 ---
CONFIG = {
    # 待处理的根文件夹
    "root_folders": [r"C:\Users\Administrator\Projects\lmy\data\csr_reports", r"C:\Users\Administrator\Projects\lmy\data\mda_reports", r"C:\Users\Administrator\Projects\lmy\data\annual_reports"],
    # 三分类过滤模型路径 (之前训练好的)
    "filter_model_path": "./results_sampled/final_model",
    # 二分类模型路径 (第1步训练好的)
    "binary_model_path": "./binary_climate_model/final_model",
    # 基础模型名称
    "base_model_name": "hfl/chinese-roberta-wwm-ext",
    # 详细结果数据库
    "db_path": "step5.db",
    # 批量预测大小
    "batch_size": 256
} 


# --- 2. 文本处理和文件解析工具函数 ---

def parse_filename(filepath):
    """从文件路径中解析ID, 公司名称和年份。"""
    filename = os.path.basename(filepath)
    # 移除.txt后缀
    name_part = filename[:-4]

    # 匹配模式：(数字)-(任意字符)-(年份)
    match = re.search(r'(\d+)[-_](.*)[-_](\d{4})', name_part)
    if match:
        report_id, company, year = match.groups()
        return report_id.strip(), company.strip(), year.strip()

    # 备用模式：(数字)-(年份)-(任意字符)
    match = re.search(r'(\d+)[-_](\d{4})[-_](.*)', name_part)
    if match:
        report_id, year, company = match.groups()
        return report_id.strip(), company.strip(), year.strip()

    # 如果都匹配不上，返回一个默认值
    return "unknown", name_part, "unknown"


def clean_and_split_text(text_content):
    """过滤、清洗并切分句子。"""
    lines = text_content.splitlines()

    # 过滤掉不含汉字或汉字过少的行
    valid_lines = []
    for line in lines:
        chinese_chars = re.findall(r'[\u4e00-\u9fa5]', line)
        if len(chinese_chars) >= 4:
            valid_lines.append(line)

    # 合并、去空格、去换行
    full_text = "".join(valid_lines).replace('\n', '').replace('\r', '').replace(' ', '').replace('\t', '')

    # 按标点符号切分句子
    if not full_text:
        return [], 0
    sentences = re.split(r'([。！？；])', full_text)

    # 将标点符号和句子合并回来
    sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])]

    # 过滤掉空句子
    sentences = [s.strip() for s in sentences if s.strip()]
    return sentences, len(sentences)


# --- 3. 模型推理工作站 (在每个子进程中独立运行) ---

# 使用全局变量来让每个子进程只加载一次模型
worker_models = {}


def init_worker():
    """每个工作进程的初始化函数，用于加载模型。"""
    global worker_models

    print(f"进程 {os.getpid()} 正在加载模型...")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(CONFIG["base_model_name"])

    # 加载三分类过滤模型
    filter_base_model = AutoModelForSequenceClassification.from_pretrained(CONFIG["base_model_name"], num_labels=3)
    filter_model = PeftModel.from_pretrained(filter_base_model, CONFIG["filter_model_path"])
    filter_model = filter_model.to(device)
    filter_model.eval()

    # 加载二分类模型
    binary_base_model = AutoModelForSequenceClassification.from_pretrained(CONFIG["base_model_name"], num_labels=2)
    binary_model = PeftModel.from_pretrained(binary_base_model, CONFIG["binary_model_path"])
    binary_model = binary_model.to(device)
    binary_model.eval()

    worker_models['tokenizer'] = tokenizer
    worker_models['filter_model'] = filter_model
    worker_models['binary_model'] = binary_model
    worker_models['device'] = device
    print(f"进程 {os.getpid()} 模型加载完毕。")


def predict_batch(sentences, model_key, return_probs=False):
    """
    (已修改) 通用批量预测函数。
    如果 return_probs=True, 返回一个包含各类概率的字典。
    """
    model = worker_models[model_key]
    tokenizer = worker_models['tokenizer']
    device = worker_models['device']

    predictions = []
    probabilities = {}

    with torch.no_grad():
        for i in range(0, len(sentences), CONFIG["batch_size"]):
            batch = sentences[i:i + CONFIG["batch_size"]]
            inputs = tokenizer(batch, padding=True, truncation=True, max_length=512, return_tensors="pt").to(device)
            outputs = model(**inputs)
            preds = torch.argmax(outputs.logits, dim=1).cpu().tolist()
            predictions.extend(preds)

            if return_probs:
                probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
                num_classes = probs.shape[1]

                # 初始化概率列表
                if not probabilities:
                    for c in range(num_classes):
                        probabilities[f'class_{c}'] = []

                # 填充概率
                for c in range(num_classes):
                    probabilities[f'class_{c}'].extend(probs[:, c].cpu().tolist())

    return predictions, probabilities


def process_single_file(filepath):
    """
    (已修改) 处理单个文件的完整流水线。
    计算并返回两个类别的完整统计信息。
    """
    # 解析文件元数据
    report_id, company, year = parse_filename(filepath)
    try:
        root_folder = os.path.basename(os.path.dirname(os.path.dirname(filepath)))
    except:
        root_folder = "unknown_source"

    # 读取和预处理文本
    try:
        with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
            content = f.read()
    except Exception as e:
        return {"error": f"读取文件失败: {e}"}

    all_sentences, total_sentences_count = clean_and_split_text(content)

    # 构造一个空的返回结果，以防文件处理提前结束
    empty_result = {
        "root_folder": root_folder, "id": report_id, "公司名称": company, "年份": year,
        "气候风险暴露预测概率最小值": 0, "气候风险暴露预测概率最大值": 0, "气候风险暴露预测概率平均值": 0,
        "气候风险暴露的句子总数": 0,
        "气候风险防范预测概率最小值": 0, "气候风险防范预测概率最大值": 0, "气候风险防范预测概率平均值": 0,
        "气候风险防范的句子总数": 0,
        "总句数": total_sentences_count
    }
    if not all_sentences:
        return empty_result

    # --- Stage 1: 三分类过滤 ---
    filter_preds, _ = predict_batch(all_sentences, 'filter_model')

    relevant_sentences = [sent for sent, pred in zip(all_sentences, filter_preds) if pred in [0, 1]]
    if not relevant_sentences:
        return empty_result

    # --- Stage 2: 二分类预测 ---
    binary_preds, probs_dict = predict_batch(relevant_sentences, 'binary_model', return_probs=True)

    # --- 数据库存储 ---
    db_records = []
    # 使用 get 获取列表，如果键不存在则返回空列表，防止出错
    exposure_probs_for_db = probs_dict.get('class_0', [])
    for sentence, pred, prob in zip(relevant_sentences, binary_preds, exposure_probs_for_db):
        db_records.append((report_id, company, year, sentence, pred, prob))

    try:
        conn = sqlite3.connect(CONFIG['db_path'])
        cursor = conn.cursor()
        cursor.executemany(
            "INSERT OR IGNORE INTO sentence_predictions (report_id, company_name, year, sentence_text, predicted_class, prediction_probability) VALUES (?, ?, ?, ?, ?, ?)",
            db_records
        )
        conn.commit()
        conn.close()
    except Exception as e:
        print(f"进程 {os.getpid()} 数据库写入失败: {e}")

    # --- 计算统计结果 ---
    exposure_sentences_count = binary_preds.count(0)
    prevention_sentences_count = binary_preds.count(1)

    all_exposure_probs = probs_dict.get('class_0', [])
    all_prevention_probs = probs_dict.get('class_1', [])

    # 筛选出属于各自类别的概率值
    class_0_probs = [p for pred, p in zip(binary_preds, all_exposure_probs) if pred == 0]
    class_1_probs = [p for pred, p in zip(binary_preds, all_prevention_probs) if pred == 1]

    # 计算类别0（暴露）的统计数据
    min_prob_0 = min(class_0_probs) if class_0_probs else 0
    max_prob_0 = max(class_0_probs) if class_0_probs else 0
    avg_prob_0 = np.mean(class_0_probs) if class_0_probs else 0

    # 计算类别1（防范）的统计数据
    min_prob_1 = min(class_1_probs) if class_1_probs else 0
    max_prob_1 = max(class_1_probs) if class_1_probs else 0
    avg_prob_1 = np.mean(class_1_probs) if class_1_probs else 0

    return {
        "root_folder": root_folder, "id": report_id, "公司名称": company, "年份": year,
        "气候风险暴露预测概率最小值": min_prob_0,
        "气候风险暴露预测概率最大值": max_prob_0,
        "气候风险暴露预测概率平均值": avg_prob_0,
        "气候风险暴露的句子总数": exposure_sentences_count,
        "气候风险防范预测概率最小值": min_prob_1,
        "气候风险防范预测概率最大值": max_prob_1,
        "气候风险防范预测概率平均值": avg_prob_1,
        "气候风险防范的句子总数": prevention_sentences_count,
        "总句数": total_sentences_count
    }


# --- 4. 主函数和并行调度 ---
if __name__ == "__main__":
    start_time = time.time()

    all_files = []
    for folder in CONFIG['root_folders']:
        if not os.path.isdir(folder):
            print(f"警告：找不到文件夹 {folder}，已跳过。")
            continue
        for year_folder in os.listdir(folder):
            try:
                if int(year_folder) < 2006:
                    continue
            except ValueError:
                print(f"警告：文件夹名称 '{year_folder}' 不是有效年份，已跳过。")
                continue

            year_path = os.path.join(folder, year_folder)
            if os.path.isdir(year_path):
                for filename in os.listdir(year_path):
                    if filename.endswith(".txt"):
                        all_files.append(os.path.join(year_path, filename))

    print(f"共找到 {len(all_files)} 个 .txt 文件待处理。")
    if not all_files:
        exit()

    num_processes = 2
    print(f"启动 {num_processes} 个并行进程...")

    with Pool(processes=num_processes, initializer=init_worker) as pool:
        results = list(tqdm(pool.imap(process_single_file, all_files), total=len(all_files)))

    print("--- 所有文件处理完成 ---")

    final_data = {os.path.basename(folder): [] for folder in CONFIG['root_folders']}

    for res in results:
        if res and "error" not in res and res.get('root_folder') in final_data:
            final_data[res['root_folder']].append(res)

    # (已修改) 定义新的列顺序
    cols_order = [
        "id", "公司名称", "年份",
        "气候风险暴露预测概率最小值", "气候风险暴露预测概率最大值", "气候风险暴露预测概率平均值",
        "气候风险暴露的句子总数",
        "气候风险防范预测概率最小值", "气候风险防范预测概率最大值", "气候风险防范预测概率平均值",
        "气候风险防范的句子总数",
        "总句数"
    ]

    for folder_name, data_list in final_data.items():
        if data_list:
            output_filename = f"{folder_name}_summary.xlsx"
            print(f"正在保存结果到 {output_filename} ...")
            df = pd.DataFrame(data_list)
            df_to_save = df[[col for col in cols_order if col in df.columns]]
            df_to_save.to_excel(output_filename, index=False, engine='openpyxl')

    end_time = time.time()
    print(f"\n全部任务完成！总耗时: {end_time - start_time:.2f} 秒。")

