# -*- encoding:utf-8 -*-
import os
import sys
import torch
import json
import random
import argparse
import collections
import torch.nn as nn
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from uer.utils.tokenizer import *
from uer.model_builder import build_model
from uer.utils.optimizers import  BertAdam
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.model_saver import save_model
from brain import KnowledgeGraph
from multiprocessing import Process, Pool
import numpy as np
import json
import numpy as np
from tqdm import tqdm
import time
import jieba

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class BertClassifier(nn.Module):
    def __init__(self, args, model):
        super(BertClassifier, self).__init__()
        self.embedding = model.embedding
        self.encoder = model.encoder
        self.labels_num = args.labels_num
        self.pooling = args.pooling
        self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size)
        self.output_layer_2 = nn.Linear(args.hidden_size, args.labels_num)
        self.softmax = nn.LogSoftmax(dim=-1)
        self.criterion = nn.NLLLoss()
        self.use_vm = not (args.no_vm or args.no_kg)  # 任一参数为True则禁用VM
        print("[BertClassifier] use visible_matrix: {}".format(self.use_vm))

    def forward(self, src, label, mask, pos=None, vm=None):
        """
        Args:
            src: [batch_size x seq_length]
            label: [batch_size]
            mask: [batch_size x seq_length]
            pos: [batch_size x seq_length]
            vm: [batch_size x seq_length x seq_length] (float类型的情感分数矩阵)
        """
        assert src.shape == mask.shape, f"src和mask尺寸不一致: {src.shape} vs {mask.shape}"
        if vm is not None:
            assert vm.shape[-1] == vm.shape[-2] == src.shape[-1], f"vm尺寸与输入不匹配: {vm.shape} vs {src.shape}"
            # print(f"Forward输入VM范围: {vm.min().item():.1f} ~ {vm.max().item():.1f}")
            # print(f"非零值占比: {(vm != 0).sum().item() / vm.numel():.1%}")

        # Embedding.
        emb = self.embedding(src, mask, pos)
        output = self.encoder(emb, mask, vm)

        # Encoder.
        if not self.use_vm:
            vm = None
        else:
            if vm is not None:
                # 确保vm是float类型
                vm = vm.float()

        # Target.
        if self.pooling == "mean":
            output = torch.mean(output, dim=1)
        elif self.pooling == "max":
            output = torch.max(output, dim=1)[0]
        elif self.pooling == "last":
            output = output[:, -1, :]
        else:
            output = output[:, 0, :]

        output = torch.tanh(self.output_layer_1(output))
        logits = self.output_layer_2(output)
        loss = self.criterion(self.softmax(logits.view(-1, self.labels_num)), label.view(-1))
        return loss, logits


def add_knowledge_worker(params):
    p_id, sentences, columns, kg, vocab, args = params

    # 加载情感知识图谱（如果是JSON格式）
    if hasattr(kg, 'kg_dict'):
        emotion_kg = kg.kg_dict
    else:
        emotion_kg = {}

    dataset = []
    for line_id, line in enumerate(sentences):
        if line_id % 10000 == 0:
            print("Progress of process {}: {}/{}".format(p_id, line_id, len(sentences)))

        line = line.strip().split('\t')
        try:
            if len(line) >= 2:  # 处理所有有效行
                label = int(line[columns["label"]])
                text = CLS_TOKEN + line[columns["text_a"]]  #提取tsv text_a部分的文本

                # 统一分词处理
                if args.tokenizer == "word":
                    words = jieba.lcut(text)    #对text进行分词，有词也有字

                    # 截断或填充到seq_length
                    words = words[:args.seq_length - 1] + [SEP_TOKEN]
                    words += [PAD_TOKEN] * (args.seq_length - len(words))

                    #将分词后的单词列表 words 转换为对应的 token ID 列表 tokens
                    tokens = [vocab.get(w) for w in words]
                    pos = list(range(len(words)))

                    # 生成visible_matrix，修改成兼容无知识图谱
                    vm = np.zeros((args.seq_length, args.seq_length), dtype=np.float32)
                    if not args.no_kg and kg and hasattr(kg, 'kg_dict'):
                        # 有KG时的逻辑
                        for i in range(min(len(words), args.seq_length)):
                            if words[i] in kg.kg_dict:
                                score = kg.kg_dict[words[i]].get("sentiment_score", 0) * 1000
                                for j in range(max(0, i - 1), min(args.seq_length, i + 2)):
                                    vm[i, j] = score
                    else:
                        # 无KG时的随机注意力（可选）
                        if random.random() < 0.3:  # 30%概率生成随机关注点
                            i = random.randint(0, args.seq_length - 1)
                            score = random.uniform(-500, 500)
                            for j in range(max(0, i - 1), min(args.seq_length, i + 2)):
                                vm[i, j] = score


                else:
                    # 原始字符级处理
                    tokens, pos, vm, _ = kg.add_knowledge_with_vm([text], add_pad=True, max_length=args.seq_length)
                    tokens = tokens[0]
                    pos = pos[0]
                    vm = vm[0].astype(np.float32)

                # 确保长度一致
                assert len(tokens) == args.seq_length, f"长度不一致: {len(tokens)} != {args.seq_length}"

                #mask 会屏蔽填充位置（mask=0）的注意力权重，确保模型不处理无效的填充 token。
                mask = [1 if t != vocab.get(PAD_TOKEN) else 0 for t in tokens]
                dataset.append((tokens, label, mask, pos, vm))

        except Exception as e:
            print(f"Error processing line {line_id}: {str(e)}")
            print("Problematic line:", line)

    print(f"Max length: {max(len(x[0]) for x in dataset)}, Min length: {min(len(x[0]) for x in dataset)}")
    return dataset


def batch_loader(batch_size, input_ids, label_ids, mask_ids, pos_ids, vms):
    instances_num = input_ids.size()[0]
    for i in range(instances_num // batch_size):
        yield input_ids[i*batch_size: (i+1)*batch_size], \
              label_ids[i*batch_size: (i+1)*batch_size], \
              mask_ids[i*batch_size: (i+1)*batch_size], \
              pos_ids[i*batch_size: (i+1)*batch_size], \
              vms[i*batch_size: (i+1)*batch_size]
    if instances_num > instances_num // batch_size * batch_size:
        yield input_ids[instances_num//batch_size*batch_size:], \
              label_ids[instances_num//batch_size*batch_size:], \
              mask_ids[instances_num//batch_size*batch_size:], \
              pos_ids[instances_num//batch_size*batch_size:], \
              vms[instances_num//batch_size*batch_size:]


def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # Path options.

    parser.add_argument("--pretrained_model_path", default=None, type=str,
                        help="Path of the pretrained model.")
    # parser.add_argument("--pretrained_model_path", default="weibo_model.bin", type=str,
    #                     help="Path of the pretrained model.")
    parser.add_argument("--output_model_path", default="./models/classifier_model.bin", type=str,
                        help="Path of the output model.")
    parser.add_argument("--vocab_path", default="./models/google_vocab.txt", type=str,
                        help="Path of the vocabulary file.")
    #只保留测试集
    parser.add_argument("--test_path", type=str, required=True,
                        help="Path of the testset.")
    parser.add_argument("--config_path", default="./models/google_config.json", type=str,
                        help="Path of the config file.")

    # Model options.
    parser.add_argument("--batch_size", type=int, default=32,
                        help="Batch size.")
    parser.add_argument("--seq_length", type=int, default=256,
                        help="Sequence length.")
    parser.add_argument("--encoder", choices=["bert", "lstm", "gru", \
                                                   "cnn", "gatedcnn", "attn", \
                                                   "rcnn", "crnn", "gpt", "bilstm"], \
                                                   default="bert", help="Encoder type.")
    parser.add_argument("--bidirectional", action="store_true", help="Specific to recurrent model.")
    parser.add_argument("--pooling", choices=["mean", "max", "first", "last"], default="first",
                        help="Pooling type.")

    # Subword options.
    parser.add_argument("--subword_type", choices=["none", "char"], default="none",
                        help="Subword feature type.")
    parser.add_argument("--sub_vocab_path", type=str, default="models/sub_vocab.txt",
                        help="Path of the subword vocabulary file.")
    parser.add_argument("--subencoder", choices=["avg", "lstm", "gru", "cnn"], default="avg",
                        help="Subencoder type.")
    parser.add_argument("--sub_layers_num", type=int, default=2, help="The number of subencoder layers.")

    # Tokenizer options.
    parser.add_argument("--tokenizer", choices=["bert", "char", "word", "space"], default="bert",
                        help="Specify the tokenizer." 
                             "Original Google BERT uses bert tokenizer on Chinese corpus."
                             "Char tokenizer segments sentences into characters."
                             "Word tokenizer supports online word segmentation based on jieba segmentor."
                             "Space tokenizer segments sentences into words according to space."
                             )
    parser.add_argument("--tokenizer_path", type=str, default="./custom_words.txt",
                        help="Path to custom dictionary for word tokenizer")

    # Optimizer options.
    parser.add_argument("--learning_rate", type=float, default=2e-5,
                        help="Learning rate.")
    parser.add_argument("--warmup", type=float, default=0.1,
                        help="Warm up value.")

    # Training options.
    parser.add_argument("--dropout", type=float, default=0.5,
                        help="Dropout.")
    parser.add_argument("--epochs_num", type=int, default=5,
                        help="Number of epochs.")
    parser.add_argument("--report_steps", type=int, default=500,
                        help="Specific steps to print prompt.")
    parser.add_argument("--seed", type=int, default=7,
                        help="Random seed.")

    # Evaluation options.
    parser.add_argument("--mean_reciprocal_rank", action="store_true", help="Evaluation metrics for DBQA dataset.")

    # kg
    parser.add_argument("--kg_name", required=False, help="KG name or path")
    parser.add_argument("--workers_num", type=int, default=1, help="number of process for loading dataset")
    parser.add_argument("--no_vm", action="store_true", help="Disable the visible_matrix")
    parser.add_argument("--no_kg", action="store_true", help="完全禁用知识图谱")

    args = parser.parse_args()
    args = load_hyperparam(args)
    set_seed(args.seed)

    # Count the number of labels.
    labels_set = set()
    columns = {}
    with open(args.test_path, mode="r", encoding="utf-8") as f:
        for line_id, line in enumerate(f):
            try:
                line = line.strip().split("\t")
                if line_id == 0:
                    for i, column_name in enumerate(line):
                        columns[column_name] = i
                    continue
                label = int(line[columns["label"]])
                labels_set.add(label)
            except:
                pass
    args.labels_num = len(labels_set)

    # Load vocabulary.
    vocab = Vocab()
    vocab.load(args.vocab_path)
    args.vocab = vocab

    args.target = "bert"
    model = build_model(args)
    model = BertClassifier(args, model)

    #如果没有模型就直接退出
    # if args.pretrained_model_path is not None:
    #     try:
    #         model.load_state_dict(torch.load(args.pretrained_model_path), strict=False)
    #     except FileNotFoundError:
    #         print(f"Error: The specified pretrained model file was not found at {args.pretrained_model_path}")
    #         exit(1)
    # else:
    #     raise FileNotFoundError(
    #         "Pretrained model path is not provided. Please specify the path to your pretrained model using --pretrained_model_path argument.")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)

    # kg = KnowledgeGraph(spo_files=[args.kg_name] if args.kg_name != 'none' else [])

    def read_dataset(path, workers_num=1):
        print("Loading sentences from {}".format(path))
        sentences = []
        with open(path, mode='r', encoding="utf-8") as f:
            for line_id, line in enumerate(f):
                if line_id == 0:
                    continue
                sentences.append(line.strip())
        sentence_num = len(sentences)
        print("There are {} sentence in total.".format(sentence_num))

        params = (0, sentences, columns, None, vocab, args)  # 简化处理
        dataset = add_knowledge_worker(params)
        return dataset

    #必须放里面，否则不能识别到model
    def evaluate(args):
        dataset = read_dataset(args.test_path, workers_num=args.workers_num)
        print("测试集标签分布:", collections.Counter([sample[1] for sample in dataset]))

        input_ids = torch.LongTensor([sample[0] for sample in dataset])
        label_ids = torch.LongTensor([sample[1] for sample in dataset])
        mask_ids = torch.LongTensor([sample[2] for sample in dataset])
        pos_ids = torch.LongTensor([example[3] for example in dataset])
        vms = [example[4] for example in dataset]

        batch_size = args.batch_size
        instances_num = input_ids.size()[0]
        total_batches = instances_num // batch_size + (1 if instances_num % batch_size != 0 else 0)

        model.eval()

        correct = 0
        confusion = torch.zeros(args.labels_num, args.labels_num, dtype=torch.long)
        predictions, true_labels = [], []

        with torch.no_grad():
            progress = tqdm(total=total_batches, desc='Evaluating', unit='batch')
            try:
                for i, (input_ids_batch, label_ids_batch, mask_ids_batch, pos_ids_batch, vms_batch) in enumerate(
                        batch_loader(batch_size, input_ids, label_ids, mask_ids, pos_ids, vms)):

                    vms_batch = torch.LongTensor(vms_batch).to(device)
                    input_ids_batch = input_ids_batch.to(device)
                    label_ids_batch = label_ids_batch.to(device)
                    mask_ids_batch = mask_ids_batch.to(device)
                    pos_ids_batch = pos_ids_batch.to(device)

                    loss, logits = model(input_ids_batch, label_ids_batch, mask_ids_batch, pos=pos_ids_batch,
                                         vm=vms_batch)

                    logits = nn.Softmax(dim=1)(logits)
                    pred = torch.argmax(logits, dim=1)
                    gold = label_ids_batch

                    for j in range(pred.size()[0]):
                        confusion[pred[j], gold[j]] += 1
                    correct += torch.sum(torch.tensor(pred == gold)).item()

                    progress.update(1)
                    progress.set_postfix({
                        'acc': f'{correct / ((i + 1) * batch_size):.2%}',
                        'loss': f'{loss.item():.4f}'
                    })
            except Exception as e:
                print(f"\nEvaluation error: {str(e)}")
                raise
            finally:
                progress.close()

        accuracy = correct / len(dataset)
        print("Accuracy: {:.4f}".format(accuracy))

        # 打印混淆矩阵
        print("\nConfusion matrix:")
        print(confusion)

        # 计算每个类别的精确率、召回率和F1分数
        for i in range(confusion.size()[0]):
            # 计算精确率 (Precision)
            if confusion[i, :].sum().item() > 0:
                p = confusion[i, i].item() / confusion[i, :].sum().item()
            else:
                p = 0
            # 计算召回率 (Recall)
            if confusion[:, i].sum().item() > 0:
                r = confusion[i, i].item() / confusion[:, i].sum().item()
            else:
                r = 0
            # 计算F1分数
            if p + r > 0:
                f1 = 2 * p * r / (p + r)
            else:
                f1 = 0
            print("Label {}: Precision: {:.3f}, Recall: {:.3f}, F1 Score: {:.3f}".format(i, p, r, f1))

        final_acc = correct / len(dataset)
        print("Acc. (Correct/Total): {:.4f} ({}/{})".format(final_acc, correct, len(dataset)))

        return final_acc

    evaluate(args)

if __name__ == "__main__":
    main()



