# -*- encoding:utf-8 -*-
"""
KNN 文本分类实现（基于原有框架修改）
"""
from collections import Counter
import os
import sys
import torch
import argparse
import torch.nn as nn
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from tqdm import tqdm
import time
import jieba
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import datetime
from io import StringIO
import re
from gensim.models import KeyedVectors
from sklearn.preprocessing import StandardScaler
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from joblib import parallel_backend
import faiss
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted

#cpu加载比gpu快，但是后续计算还是会用gpu
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
print(f"Using device: {device}")

class OutputCollector:
    """收集并过滤终端输出的类"""

    def __init__(self):
        self.terminal = sys.stdout
        self.buffer = StringIO()
        # self.tqdm_pattern = re.compile(r'.*(\d+%).*$$.*$$')  # 匹配进度条格式
        self.tqdm_pattern = re.compile(r'^\s*\d+%\|.*\|\s*\d+/\d+')  # 严格匹配tqdm格式
        self._is_tqdm = False  # 新增状态标识

    def write(self, message):
        # 同时输出到终端和缓冲区
        self.terminal.write(message)

        # 过滤进度条输出
        if self._is_tqdm:
            # 检查是否为进度条结束
            if '\n' in message:
                self._is_tqdm = False
            return  # 不保存进度条更新

        self.buffer.write(message)

    def flush(self):
        self.terminal.flush()

    def get_filtered_output(self):
        # 获取并过滤缓冲区内容（保持原逻辑）
        full_output = self.buffer.getvalue()
        lines = full_output.split('\n')
        filtered = [line for line in lines if not self.tqdm_pattern.match(line)]
        return '\n'.join(filtered)

    # 新增方法设置tqdm状态
    def set_tqdm(self, is_tqdm=True):
        self._is_tqdm = is_tqdm


class FaissKNN(BaseEstimator, ClassifierMixin):
    """支持网格搜索的Faiss KNN分类器"""

    def __init__(self, n_neighbors=5, weights='uniform', metric='l2'):
        self.n_neighbors = n_neighbors
        self.weights = weights
        self.metric = metric
        self.classes_ = None
        self._faiss_index = None
        self._y_train = None
        self.scaler = StandardScaler()

    def _build_index(self, X):
        X = X.astype('float32')
        res = faiss.StandardGpuResources()  # 获取GPU资源
        if self.metric == 'cosine':
            index = faiss.IndexFlatIP(X.shape[1])
            faiss.normalize_L2(X)
        else:
            index = faiss.IndexFlatL2(X.shape[1])
        self._faiss_index = faiss.index_cpu_to_gpu(res, 0, index)  # 移动到GPU
        self._faiss_index.add(X)

    def fit(self, X, y):
        # 数据校验
        X, y = check_X_y(X, y)
        check_classification_targets(y)
        self.classes_ = np.unique(y)

        # 数据标准化
        X_scaled = self.scaler.fit_transform(X).astype('float32')

        # 构建Faiss索引
        self._build_index(X_scaled)
        self._y_train = y
        return self

    def predict(self, X):
        check_is_fitted(self)
        X = check_array(X)

        # 标准化处理
        X_scaled = self.scaler.transform(X).astype('float32')

        # 处理余弦相似度
        if self.metric == 'cosine':
            faiss.normalize_L2(X_scaled)

        # 搜索近邻
        distances, indices = self._faiss_index.search(X_scaled, self.n_neighbors)

        # 投票逻辑
        if self.weights == 'uniform':
            neighbors_labels = self._y_train[indices]
            y_pred = np.array([np.bincount(row).argmax() for row in neighbors_labels])
        else:  # distance权重
            distances = 1.0 / (distances + 1e-6)  # 防止除零
            weighted_votes = np.zeros((len(X), len(self.classes_)))
            for i in range(len(X)):
                for j in range(self.n_neighbors):
                    label = self._y_train[indices[i, j]]
                    weighted_votes[i, label] += distances[i, j]
            y_pred = np.argmax(weighted_votes, axis=1)
        return y_pred


class EnhancedKNNClassifier:
    """增强版KNN分类器（包含Faiss和网格搜索）"""

    def __init__(self, args):
        self.args = args
        self.model = None
        self.best_params_ = None

    def grid_search(self, X_train, y_train):
        # 定义搜索空间
        param_grid = {
            'n_neighbors': self.args.n_neighbors,
            'weights': self.args.weights,
            'metric': self.args.metric
        }

        # 创建pipeline
        pipeline = Pipeline([
            ('scaler', StandardScaler()),
            ('knn', FaissKNN())
        ])

        # 配置网格搜索
        grid = GridSearchCV(
            estimator=pipeline,
            param_grid={'knn__' + k: v for k, v in param_grid.items()},
            cv=3,
            scoring='f1_macro',
            verbose=3,
            n_jobs= self.args.n_jobs,
        )

        # 执行搜索
        with parallel_backend('multiprocessing'):
            grid.fit(X_train, y_train)

        # 保存最佳模型
        self.model = grid.best_estimator_
        self.best_params_ = grid.best_params_
        print(f"Best parameters: {grid.best_params_}")

    def predict(self, X):
        return self.model.predict(X)

    def evaluate(self, X, y, tag='Test'):
        y_pred = self.predict(X)
        print(f"\n{tag} Classification Report:")
        print(classification_report(y, y_pred))
        print(f"{tag} Accuracy: {accuracy_score(y, y_pred):.4f}")

        # 混淆矩阵输出
        cm = confusion_matrix(y, y_pred)
        cm_df = pd.DataFrame(cm,
                             index=sorted(np.unique(y)),
                             columns=sorted(np.unique(y)))
        print(f"\n{tag} Confusion Matrix:")
        print(cm_df.to_string())

def build_dataset(args, file_path, vocab, tag):
    """构建数据集（生成特征向量）"""
    X = []
    y = []
    label_counter = Counter()

    # 计算预截断长度
    pre_truncate_length = args.seq_length * (4 if args.tokenizer == "word" else 1)

    with open(file_path, 'r', encoding='utf-8') as f:
        sys.stdout.set_tqdm(True)
        next(f)  # 跳过标题行

        for line in tqdm(f, desc=f"Loading {file_path}", file=sys.stdout):
            parts = line.strip().split('\t')
            if len(parts) < 2:
                continue

            try:
                label = int(parts[0])
                text = parts[1]
            except (IndexError, ValueError):
                print(f"数据格式错误，跳过该行: {line.strip()}")
                continue

            # 分词并生成token_ids
            tokens = tokenize_text(text, args.tokenizer, pre_truncate_length)
            token_ids = [vocab.get(token) for token in tokens]  # 处理未知词

            # 填充/截断序列 -----------------------------------------------------
            if len(token_ids) < args.seq_length:
                token_ids += [PAD_ID] * (args.seq_length - len(token_ids))
            else:
                token_ids = token_ids[:args.seq_length]  # 关键修复点

            # 生成特征向量 ------------------------------------------------------
            with torch.no_grad():  # 禁用梯度计算
                input_tensor = torch.LongTensor([token_ids]).to(device)  # 移动到GPU
                embeddings = args.embedding_layer(input_tensor)
                mean_emb = embeddings.mean(dim=1).squeeze().cpu().numpy()  # 移回CPU用于KNN

            X.append(mean_emb)
            y.append(label)
            label_counter.update([label])  # 更新标签统计 -------------------------

    sys.stdout.set_tqdm(False)
    print(f"{tag} Label distribution: {label_counter}")
    return np.array(X), np.array(y)

def load_word_vectors_with_cache(args, vocab, cache_dir=".."):
    """带缓存的词向量加载函数（改进版）"""
    # os.makedirs(cache_dir, exist_ok=True)  # 确保目录存在

    base_name = os.path.basename(args.word2vec_path)
    cache_file = os.path.join(cache_dir, f"{base_name}.kv")

    print("加载词向量...")
    if os.path.exists(cache_file):
        print(f"加载缓存文件: {cache_file}")
        word2vec = KeyedVectors.load(cache_file, mmap='r')
    else:
        print(f"从原始文件加载: {args.word2vec_path}")
        try:
            word2vec = KeyedVectors.load_word2vec_format(
                args.word2vec_path, binary=False, unicode_errors='ignore'
            )
            word2vec.save(cache_file)
        except Exception as e:
            raise RuntimeError(f"词向量加载失败: {str(e)}")

    # 维度校验
    if args.embedding_dim != word2vec.vector_size:
        raise ValueError(f"维度不匹配: 配置{args.embedding_dim} vs 实际{word2vec.vector_size}")

    # 初始化词向量矩阵
    emb_matrix = np.random.normal(
        scale=0.05,
        size=(len(vocab), word2vec.vector_size)
    )
    found = 0

    for idx in range(len(vocab)):
        word = vocab.i2w[idx]

        # 跳过纯数字/符号
        if not word.isalnum():
            continue

        # 尝试多种词形
        variants = [word, word.lower(), word.capitalize(), word.title()]
        for variant in variants:
            if variant in word2vec:
                emb_matrix[idx] = word2vec[variant]
                found += 1
                break

    print(f"覆盖词汇: {found}/{len(vocab)} ({found / len(vocab):.1%})")
    return emb_matrix

def tokenize_text(text, tokenizer_type="word", max_length=None):
    """统一分词处理（增加预截断功能）"""
    # 先进行原始文本长度控制（如果指定了max_length）
    if max_length is not None and len(text) > max_length:
        text = text[:max_length]  # 先截断原始文本

    if tokenizer_type == "word":
        return list(jieba.cut(text))
    elif tokenizer_type == "char":
        return list(text)
    else:  # 默认按空格分词
        return text.split()

def main():
    # 初始化日志记录（放在最前面）
    output_collector = OutputCollector()
    sys.stdout = output_collector

    # 添加启动时间戳
    print("\n" + "=" * 60)
    print(f"【训练启动】{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print("=" * 60 + "\n")

    try:
        # 参数解析（移除CNN相关参数）
        parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)

        # 路径参数
        parser.add_argument("--output_model_path", default=None, type=str)
        parser.add_argument("--vocab_path", required=True, type=str)
        parser.add_argument("--data_dir", type=str, required=True)
        parser.add_argument("--word2vec_path", type=str, default=None)

        # KNN相关参数
        parser.add_argument("--n_neighbors", type=int, default=[3, 5, 9, 15, 21], nargs='+',
                            help="K值候选列表，用于网格搜索")
        parser.add_argument("--weights", choices=['uniform', 'distance'], default=['uniform', 'distance'],
                            nargs='+', help="权重类型候选，用于网格搜索")
        parser.add_argument("--metric", choices=['l2', 'cosine'], default=['l2', 'cosine'],
                            nargs='+', help="距离度量候选，用于网格搜索")
        parser.add_argument("--n_jobs", default=4, help="计算并行数")

        # 通用参数
        parser.add_argument("--seq_length", type=int, default=256)
        parser.add_argument("--embedding_dim", type=int, default=300)
        parser.add_argument("--seed", type=int, default=42)
        parser.add_argument("--tokenizer", choices=["word", "char", "space"], default="word")

        args = parser.parse_args()
        torch.manual_seed(args.seed)

        # 加载词表并初始化嵌入层
        vocab = Vocab()
        vocab.load(args.vocab_path)
        args.embedding_layer = nn.Embedding(len(vocab), args.embedding_dim, padding_idx=PAD_ID)

        # 加载预训练词向量
        if args.word2vec_path:
            if os.path.exists(args.word2vec_path):
                pretrained_emb = load_word_vectors_with_cache(args, vocab)
                args.embedding_layer.weight.data.copy_(torch.from_numpy(pretrained_emb))
                args.embedding_layer.weight.requires_grad = False
            else:
                print(f"[Warning] 词向量文件不存在，使用随机初始化: {args.word2vec_path}")

        args.embedding_layer.to(device)  # 将嵌入层移动到GPU

        # 构建数据集
        print("\nBuilding datasets...")
        X_train, y_train = build_dataset(args, os.path.join(args.data_dir, "train.tsv"), vocab, "train")
        X_dev, y_dev = build_dataset(args, os.path.join(args.data_dir, "dev.tsv"), vocab, "dev")
        X_test, y_test = build_dataset(args, os.path.join(args.data_dir, "test.tsv"), vocab, "test")

        #提示训练参数
        print("训练参数概要：")
        print(f"- 数据集路径: {args.data_dir}")
        print(f"- 词向量路径: {args.word2vec_path}")
        print(f"- 词表路径: {args.vocab_path}")
        print(f"- 序列长度: {args.seq_length}")
        print(f"- K值候选: {args.n_neighbors}")
        print(f"- 权重候选: {args.weights}")
        print(f"- 距离度量候选: {args.metric}")
        print(f"- 搜索并行数: {args.n_jobs}")
        print("-" * 50 + "\n")

        # 初始化模型并执行网格搜索
        print("\n===== Starting Grid Search =====")
        model = EnhancedKNNClassifier(args)
        model.grid_search(X_train, y_train)  # 执行网格搜索

        # 评估函数
        def evaluate(X, y, tag):
            y_pred = model.predict(X)
            print(f"\n{tag} Classification Report:")
            print(classification_report(y, y_pred))
            print(f"{tag} Accuracy: {accuracy_score(y, y_pred):.4f}")

            # 新增混淆矩阵输出
            cm = confusion_matrix(y, y_pred)
            cm_df = pd.DataFrame(cm,
                                 index=sorted(np.unique(y)),
                                 columns=sorted(np.unique(y)))
            print(f"\n{tag} Confusion Matrix:")
            print(cm_df.to_string())  # 避免换行问题

        # 最终评估
        print("\n===== Evaluation =====")
        evaluate(X_dev, y_dev, "Dev")
        evaluate(X_test, y_test, "Test")

    finally:
        # 恢复标准输出
        sys.stdout = output_collector.terminal

        # 将过滤后的内容写入文件
        filtered_output = output_collector.get_filtered_output()
        dataset_name = os.path.basename(os.path.normpath(args.data_dir))
        current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        result_str = f"./train_log/{dataset_name}_faiss_{current_time}.txt"
        with open(result_str, 'a', encoding='utf-8') as f:
            f.write("\n" + "=" * 60 + "\n")
            f.write(f"【训练日志】{time.strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(filtered_output)
            f.write("\n" + "=" * 60 + "\n\n")

if __name__ == "__main__":
    main()




