# -*- encoding:utf-8 -*-
"""
TextCNN 文本分类实现（支持词/字级别输入和预训练词向量）
"""
from collections import Counter
import os
import sys
import torch
import json
import random
import argparse
import torch.nn as nn
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from uer.utils.tokenizer import *
from uer.model_builder import build_model
from uer.utils.optimizers import BertAdam
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.model_saver import save_model
import numpy as np
from tqdm import tqdm
import time
import jieba
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import datetime
import torch.nn.functional as F
import pandas as pd
from io import StringIO
import re
from gensim.models import KeyedVectors
from typing import Union, Tuple

class OutputCollector:
    """收集并过滤终端输出的类"""

    def __init__(self):
        self.terminal = sys.stdout
        self.buffer = StringIO()
        # self.tqdm_pattern = re.compile(r'.*(\d+%).*$$.*$$')  # 匹配进度条格式
        self.tqdm_pattern = re.compile(r'^\s*\d+%\|.*\|\s*\d+/\d+')  # 严格匹配tqdm格式
        self._is_tqdm = False  # 新增状态标识

    def write(self, message):
        # 同时输出到终端和缓冲区
        self.terminal.write(message)

        # 过滤进度条输出
        if self._is_tqdm:
            # 检查是否为进度条结束
            if '\n' in message:
                self._is_tqdm = False
            return  # 不保存进度条更新

        self.buffer.write(message)

    def flush(self):
        self.terminal.flush()

    def get_filtered_output(self):
        # 获取并过滤缓冲区内容（保持原逻辑）
        full_output = self.buffer.getvalue()
        lines = full_output.split('\n')
        filtered = [line for line in lines if not self.tqdm_pattern.match(line)]
        return '\n'.join(filtered)

    # 新增方法设置tqdm状态
    def set_tqdm(self, is_tqdm=True):
        self._is_tqdm = is_tqdm

class TextCNNClassifier(nn.Module):
    def __init__(self, args, vocab_size, pretrained_embeddings=None):
        super(TextCNNClassifier, self).__init__()
        self.args = args

        # 词嵌入层
        self.embedding = nn.Embedding(vocab_size, args.embedding_dim, padding_idx=PAD_ID)
        if pretrained_embeddings is not None:
            self.embedding.weight.data.copy_(torch.from_numpy(pretrained_embeddings))
            if not args.finetune_emb:  # 是否微调词向量
                self.embedding.weight.requires_grad = False

        # 多尺寸卷积层
        self.convs = nn.ModuleList([
            nn.Conv1d(args.embedding_dim, args.num_filters, k, padding=k // 2)
            for k in args.kernel_sizes
        ])

        # 分类层
        self.dropout = nn.Dropout(args.dropout)
        self.fc = nn.Linear(len(args.kernel_sizes) * args.num_filters, args.labels_num)
        self.criterion = nn.CrossEntropyLoss()

    def forward(self, input_ids, labels=None):
        # 输入形状: [batch, seq_len]
        x = self.embedding(input_ids)  # [batch, seq_len, emb_dim]
        x = x.permute(0, 2, 1)  # [batch, emb_dim, seq_len]

        # 多卷积核处理
        x = [torch.relu(conv(x)) for conv in self.convs]
        x = [F.max_pool1d(c, c.size(2)).squeeze(2) for c in x]  # [batch, num_filters]
        x = torch.cat(x, 1)  # 合并所有卷积核结果

        # 分类
        x = self.dropout(x)
        logits = self.fc(x)

        if labels is not None:
            loss = self.criterion(logits, labels.view(-1))
            return loss, logits
        return logits

def load_word_vectors_with_cache(word2vec_path, vocab, cache_dir=".."):
    """
    带缓存的词向量加载函数
    参数:
        word2vec_path: 原始词向量文本文件路径
        vocab: 词汇表对象
        cache_dir: 缓存目录
    """
    # 创建缓存目录
    # os.makedirs(cache_dir, exist_ok=True)

    # 生成缓存文件名(基于原始文件名)
    base_name = os.path.basename(word2vec_path)
    cache_file = os.path.join(cache_dir, f"{base_name}.kv")

    print("加载词向量...")
    # 检查缓存是否存在
    if os.path.exists(cache_file):
        print(f"Loading cached word vectors from {cache_file}...")
        start_time = time.time()
        word2vec = KeyedVectors.load(cache_file, mmap='r')
        print(f"Loaded cached vectors in {time.time() - start_time:.2f}s")
    else:
        print(f"Loading and caching word vectors from {word2vec_path}...")
        start_time = time.time()

        # 加载原始文本格式词向量
        word2vec = KeyedVectors.load_word2vec_format(
            word2vec_path,
            binary=False,
            unicode_errors='ignore'
        )

        # 保存为KeyedVectors格式缓存
        word2vec.save(cache_file)
        print(f"Loaded and cached vectors in {time.time() - start_time:.2f}s")

    # 初始化词向量矩阵
    emb_matrix = np.random.randn(len(vocab), word2vec.vector_size) * 0.01
    found = 0

    # 修正这里：使用Vocab类的正确方法访问词汇
    for idx in range(len(vocab)):
        word = vocab.i2w[idx]  # 通过索引获取单词
        if word in word2vec:
            emb_matrix[idx] = word2vec[word]
            found += 1
        elif word.lower() in word2vec:  # 尝试小写形式
            emb_matrix[idx] = word2vec[word.lower()]
            found += 1

    print(f"覆盖词汇: {found}/{len(vocab)} ({found / len(vocab):.1%})")
    return emb_matrix

def tokenize_text(text, tokenizer_type="word", max_length=None):
    """统一分词处理（增加预截断功能）"""
    # 先进行原始文本长度控制（如果指定了max_length）
    if max_length is not None and len(text) > max_length:
        text = text[:max_length]  # 先截断原始文本

    if tokenizer_type == "word":
        return list(jieba.cut(text))
    elif tokenizer_type == "char":
        return list(text)
    else:  # 默认按空格分词
        return text.split()

def batch_loader(batch_size, dataset):
    """
    TextCNN专用的数据加载器
    返回: (input_ids, labels)
    """
    for i in range(0, len(dataset), batch_size):
        batch = dataset[i:i + batch_size]
        input_ids = torch.LongTensor([x[0] for x in batch])
        labels = torch.LongTensor([x[1] for x in batch])
        yield input_ids, labels

def build_dataset(args, file_path, vocab, tag):
    """构建数据集（兼容词/字级别）"""
    dataset = []
    label_counter = Counter()

    # 计算预截断长度（为分词留出余量）
    pre_truncate_length = args.seq_length * (
        4 if args.tokenizer == "word" else 1  # 中文词平均约3-4字/词
    )

    with open(file_path, 'r', encoding='utf-8') as f:
        sys.stdout.set_tqdm(True)
        next(f)  # 跳过首行标题

        for line in tqdm(f, desc=f"Loading {file_path}", file=sys.stdout):
            parts = line.strip().split('\t')
            if len(parts) < 2:
                continue

            try:
                label = int(parts[0])
                text = parts[1]
            except (IndexError, ValueError):
                print(f"数据格式错误，跳过该行: {line.strip()}")
                continue

            # 修改点：先截断后分词
            tokens = tokenize_text(text, args.tokenizer, pre_truncate_length)
            token_ids = [vocab.get(token) for token in tokens]

            # 最终长度控制（保持原逻辑）
            if len(token_ids) < args.seq_length:
                token_ids += [PAD_ID] * (args.seq_length - len(token_ids))
            else:
                token_ids = token_ids[:args.seq_length]

            dataset.append((token_ids, label))
            label_counter.update([label])

        sys.stdout.set_tqdm(False)

    print(f"{tag} Label distribution: {label_counter}")
    return dataset

def main():
    # 初始化日志记录（放在最前面）
    output_collector = OutputCollector()
    sys.stdout = output_collector

    # 添加启动时间戳
    print("\n" + "=" * 60)
    print(f"【训练启动】{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print("=" * 60 + "\n")

    try:
        parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)

        # 路径参数
        parser.add_argument("--output_model_path", default="./models/textcnn_model.bin",
                            type=str, help="模型保存路径")
        parser.add_argument("--vocab_path", required=True, type=str, help="词表路径")
        parser.add_argument("--data_dir", type=str, required=True, help="数据集目录")
        parser.add_argument("--word2vec_path", type=str, default=None, help="预训练词向量路径")

        # 模型参数
        parser.add_argument("--batch_size", type=int, default=32, help="批大小")
        parser.add_argument("--seq_length", type=int, default=256, help="序列长度")
        parser.add_argument("--embedding_dim", type=int, default=300)
        parser.add_argument("--kernel_sizes", type=int, nargs='+', default=[3, 4, 5])
        parser.add_argument("--num_filters", type=int, default=100)
        parser.add_argument("--dropout", type=float, default=0.5)
        parser.add_argument("--finetune_emb", action="store_true", help="是否微调词向量")
        parser.add_argument("--labels_num", type=int, required=False, help="分类标签数")

        # 训练参数
        parser.add_argument("--learning_rate", type=float, default=1e-3)
        parser.add_argument("--epochs_num", type=int, default=10)
        parser.add_argument("--report_steps", type=int, default=100)
        parser.add_argument("--seed", type=int, default=42)
        parser.add_argument("--tokenizer", choices=["word", "char", "space"],
                            default="word", help="分词方式")

        args = parser.parse_args()
        torch.manual_seed(args.seed)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 加载词表
        vocab = Vocab()
        vocab.load(args.vocab_path)
        args.vocab = vocab

        # 加载词向量
        pretrained_emb = None
        if args.word2vec_path and os.path.exists(args.word2vec_path):
            pretrained_emb = load_word_vectors_with_cache(args.word2vec_path, args.vocab)

        # 数据准备
        args.train_path = os.path.join(args.data_dir, "train.tsv")
        args.dev_path = os.path.join(args.data_dir, "dev.tsv")
        args.test_path = os.path.join(args.data_dir, "test.tsv")

        # 统计数据集内标签种类数目
        labels_set = set()
        columns = {}
        with open(args.train_path, mode="r", encoding="utf-8") as f:
            for line_id, line in enumerate(f):
                try:
                    line = line.strip().split("\t")
                    if line_id == 0:
                        for i, column_name in enumerate(line):
                            columns[column_name] = i
                        continue
                    label = int(line[columns["label"]])
                    labels_set.add(label)
                except:
                    pass
        args.labels_num = len(labels_set)
        # print("Labels_num: ", args.labels_num)

        train_set = build_dataset(args, args.train_path, vocab, "train")
        dev_set = build_dataset(args, args.dev_path, vocab, "dev")
        test_set = build_dataset(args, args.test_path, vocab, "test")

        # 修改验证集评估部分
        def evaluate(model, dataset, device, args):
            """
            TextCNN专用评估函数
            """
            model.eval()
            correct = 0
            predictions = []
            true_labels = []

            with torch.no_grad():
                # 使用tqdm进度条
                progress_bar = tqdm(
                    batch_loader(args.batch_size, dataset),
                    total=len(dataset) // args.batch_size + (1 if len(dataset) % args.batch_size != 0 else 0),
                    desc='Evaluating',
                    unit='batch'
                )

                for input_ids, labels in progress_bar:
                    input_ids, labels = input_ids.to(device), labels.to(device)

                    # TextCNN只需要input_ids
                    logits = model(input_ids)
                    preds = torch.argmax(logits, dim=1)

                    predictions.extend(preds.cpu().numpy())
                    true_labels.extend(labels.cpu().numpy())
                    correct += (preds == labels).sum().item()

                    # 更新进度条
                    progress_bar.set_postfix(acc=f'{correct / (len(predictions)):.2%}')

            # 计算评估指标
            acc = correct / len(dataset)
            report = classification_report(
                true_labels,
                predictions,
                target_names=[f'Class {i}' for i in range(args.labels_num)]
            )
            cm = confusion_matrix(true_labels, predictions)

            print("\nClassification Report:")
            print(report)
            print("\nConfusion Matrix:")
            print(cm)
            print(f"\nAccuracy: {acc:.4f} ({correct}/{len(dataset)})")

            return acc

        # 构建模型
        model = TextCNNClassifier(args, len(vocab), pretrained_emb).to(device)

        # 优化器
        optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

        #提示训练参数
        print("训练参数概要：")
        print(f"- 数据集路径: {args.data_dir}")
        print(f"- 词向量路径: {args.word2vec_path}")
        print(f"- 词表路径: {args.vocab_path}")
        print(f"- 学习率: {args.learning_rate}")
        print(f"- 批次大小: {args.batch_size}")
        print(f"- 序列长度: {args.seq_length}")
        print(f"- 预期训练轮数: {args.epochs_num}")
        print("-" * 50 + "\n")

        # 开始训练
        args.epoch = 0
        prev_test_acc = 0.0
        stop_training = False

        # 配合早停机制
        while not stop_training:
            args.epoch += 1
            print(f"\n--------------Epoch:{args.epoch}--------------", )
            model.train()
            total_loss = 0

            # 训练进度条
            progress_bar = tqdm(
                batch_loader(args.batch_size, train_set),
                total=len(train_set) // args.batch_size + (1 if len(train_set) % args.batch_size != 0 else 0),
                desc=f'Epoch {args.epoch}',
                unit='batch'
            )

            for batch_idx, (input_ids, labels) in enumerate(progress_bar):
                input_ids, labels = input_ids.to(device), labels.to(device)

                optimizer.zero_grad()
                loss, _ = model(input_ids, labels)  # TextCNN需要labels计算loss
                loss.backward()
                optimizer.step()

                total_loss += loss.item()
                progress_bar.set_postfix(loss=f'{loss.item():.4f}', avg_loss=f'{total_loss / (batch_idx + 1):.4f}')

            # 验证集评估
            print("\nStart evaluation on dev dataset.")
            val_acc = evaluate(model, dev_set, device, args)

            # 测试集评估
            print("\nStart evaluation on test dataset.")
            test_acc = evaluate(model, test_set, device, args)

            # 早停判断（至少训练args.epoch_num个epoch, 最多训练3*args.epoch_num轮）
            if args.epoch >= args.epochs_num:
                if prev_test_acc is not None and test_acc - prev_test_acc < 0.003:
                    print(f"Early stopping at epoch {args.epoch} | Δacc={abs(test_acc - prev_test_acc):.4f}")
                    stop_training = True
            if args.epoch >= 3*args.epochs_num:
                stop_training = True

            prev_test_acc = test_acc

    finally:
        # 恢复标准输出
        sys.stdout = output_collector.terminal

        # 将过滤后的内容写入文件
        filtered_output = output_collector.get_filtered_output()
        dataset_name = os.path.basename(os.path.normpath(args.data_dir))
        current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        result_str = f"./train_log/{dataset_name}_cnn_{current_time}_epoch{args.epoch}.txt"
        with open(result_str, 'a', encoding='utf-8') as f:
            f.write("\n" + "=" * 60 + "\n")
            f.write(f"【训练日志】{time.strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(filtered_output)
            f.write("\n" + "=" * 60 + "\n\n")

if __name__ == "__main__":
    main()