import os
import time

import numpy as np
import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, get_linear_schedule_with_warmup

from collate import collate_fn, acos_collate_fn
from acoxs_dataset import ACOXSDataset, ACOSDataset
from finetuning_argparse import init_args
from labels import get_aspect_category, get_category_sentiment_num_list
from mrc_model import MRCModel
from tools import get_logger, seed_everything, save_model, print_results
from trainer import ACOXSTrainer

import logging
from colorlog import ColoredFormatter


def get_logger(log_path):
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.handlers = []  # 避免重复添加 handler

    # === 写入文件的 handler ===
    file_formatter = logging.Formatter(
        fmt='[%(levelname)s] %(asctime)s - %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    file_handler = logging.FileHandler(log_path, encoding='utf-8')
    file_handler.setFormatter(file_formatter)

    # === 彩色终端输出的 handler ===
    color_formatter = ColoredFormatter(
        fmt="%(log_color)s[%(levelname)s] %(asctime)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
        log_colors={
            'DEBUG':    'purple',  # 当info2使用
            'INFO':     'white',
            'WARNING':  'yellow',
            'ERROR':    'red',
            'CRITICAL': 'bold_red,bg_white',
        }
    )
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(color_formatter)

    # === 添加 handler ===
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)

    return logger


def do_train():
    # init model
    logger.info("Building test Model...")
    category_list = get_aspect_category(args.task, args.data_type)
    args.category_dim = len(category_list[0])

    # category and sentiment num_list
    res_lists = get_category_sentiment_num_list(args)
    args.category_num_list = res_lists[0]
    args.sentiment_num_list = res_lists[-1]

    # Base Model: Bert(Transformers)
    model = MRCModel(args, args.category_dim)
    model = model.to(device='cuda' if torch.cuda.is_available() else 'cpu')

    # # 打印模型的所有参数名和对应的形状
    # for name, param in model.state_dict().items():
    #     print(name, param.shape)

    # dataset   ACOXSDataset
    train_dataset = ACOXSDataset(tokenizer, args, "train")   # train 训练集
    dev_dataset = ACOXSDataset(tokenizer, args, "dev")       # development 验证集

    # dataloader
    train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.train_batch_size, shuffle=True, drop_last=True,
                                  collate_fn=collate_fn)

    # 训练的时候设置为args.eval_batch_size 若args.eval_batch_size过大，可能会出现cuda out of memory
    dev_dataloader = DataLoader(dataset=dev_dataset, batch_size=args.eval_batch_size, collate_fn=collate_fn)

    # optimizer 优化器
    logger.info('initial optimizer......')
    # 获取模型中的所有参数的名称和值（即权重和偏置）
    param_optimizer = list(model.named_parameters())

    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if "_bert" in n],
         'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if "_bert" not in n],
         'lr': args.learning_rate1,
         'weight_decay': 0.01}
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate2)  # 全局lr，默认

    # scheduler 学习率调度器
    batch_num_train = len(train_dataset) // args.train_batch_size
    training_steps = args.epoch_num * batch_num_train
    warmup_steps = int(training_steps * args.warm_up)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                num_training_steps=training_steps)

    # trainer object
    trainer = ACOXSTrainer(logger, model, optimizer, scheduler, tokenizer, args)
    # Training
    logger.info("***** Running Training *****")
    # 初始化最佳性能指标
    best_dev_quadruple_f1, best_dev_quintuple_f1 = .0, .0

    for epoch in range(1, args.epoch_num + 1):
        model.train()
        # train
        trainer.train(train_dataloader, epoch)
        # eval
        logger.info("***** Running Dev | Epoch {} *****".format(epoch))
        results = trainer.eval(dev_dataloader)

        if results['quintuple']['f1'] == 0:
            continue
        print_results(logger, results)
        if results['quintuple']['f1'] > best_dev_quintuple_f1:
            best_dev_quintuple_f1 = results['quintuple']['f1']
            save_model(output_path, f"{args.data_type}_test", epoch, optimizer, model)
            logger.info("i got the best dev result {}...".format(best_dev_quintuple_f1))
        # 清除缓存
        torch.cuda.empty_cache()

    logger.info("***** Train Over *****")
    logger.info("The best dev quintuple f1: {}".format(best_dev_quintuple_f1))


def do_acos_train():
    # init model
    logger.info("Building test Model...")
    category_list = get_aspect_category(args.task, args.data_type)
    args.category_dim = len(category_list[0])

    # category and sentiment num_list
    res_lists = get_category_sentiment_num_list(args)
    args.category_num_list = res_lists[0]
    args.sentiment_num_list = res_lists[-1]

    # Base Model: Bert(Transformers)
    model = MRCModel(args, args.category_dim)
    model = model.to(device='cuda' if torch.cuda.is_available() else 'cpu')

    # dataset   ACOXSDataset
    train_dataset = ACOSDataset(tokenizer, args, "train")   # train 训练集
    dev_dataset = ACOSDataset(tokenizer, args, "dev")     # development 验证集

    # dataloader
    train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.train_batch_size, shuffle=True, drop_last=True,
                                  collate_fn=acos_collate_fn, num_workers=0)

    # 训练的时候设置为args.eval_batch_size 若args.eval_batch_size过大，可能会出现cuda out of memory
    dev_dataloader = DataLoader(dataset=dev_dataset, batch_size=args.eval_batch_size, collate_fn=acos_collate_fn)

    # optimizer 优化器
    logger.info('initial optimizer......')
    # 获取模型中的所有参数的名称和值（即权重和偏置）
    param_optimizer = list(model.named_parameters())

    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if "_bert" in n],
         'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if "_bert" not in n],
         'lr': args.learning_rate1,
         'weight_decay': 0.01}
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate2)  # 全局lr，默认

    # scheduler 学习率调度器
    batch_num_train = len(train_dataset) // args.train_batch_size
    training_steps = args.epoch_num * batch_num_train
    warmup_steps = int(training_steps * args.warm_up)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                num_training_steps=training_steps)

    # trainer object
    trainer = ACOXSTrainer(logger, model, optimizer, scheduler, tokenizer, args)

    # Training
    logger.info("***** Running Training *****")
    # 初始化最佳性能指标
    best_dev_quadruple_f1 = .0

    for epoch in range(1, args.epoch_num + 1):
        model.train()
        # train
        trainer.acos_train(train_dataloader, epoch)
        # eval
        logger.info("***** Running Dev | Epoch {} *****".format(epoch))
        results = trainer.acos_eval(dev_dataloader)

        if results['quadruple']['f1'] == 0:
            continue
        print_results(logger, results)
        if results['quadruple']['f1'] > best_dev_quadruple_f1:
            best_dev_quadruple_f1 = results['quadruple']['f1']
            save_model(output_path, f"{args.data_type}_test", epoch, optimizer, model)
            logger.info("i got the best dev result {}...".format(best_dev_quadruple_f1))
        # 清除缓存
        torch.cuda.empty_cache()

    logger.info("***** Train Over *****")
    logger.info("The best dev quadruple f1: {}".format(best_dev_quadruple_f1))


# 跑dev_dataset 就是do_eval 跑test_dataset, 就是do_test
def do_eval(model, dev_dataloader):

    trainer = ACOXSTrainer(logger, model, None, None, tokenizer, args)
    # ##########Dev##########
    logger.info("***** Running Dev *****")
    dev_results = trainer.eval(dev_dataloader)
    if args.do_optimized:
        print_results(logger, dev_results)
    else:
        print_results(logger, dev_results)

    return dev_results


def do_test(model, test_dataloader):
    trainer = ACOXSTrainer(logger, model, None, None, tokenizer, args)
    logger.info("***** Running Test *****")
    test_results = trainer.eval(test_dataloader)
    if args.do_optimized:
        print_results(logger, test_results)
    else:
        print_results(logger, test_results)

    return test_results


def do_optimized():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # ##########init model##########
    logger.info("Building MRC-CLRI Model...")
    category_list = get_aspect_category(args.task, args.data_type)
    args.category_dim = len(category_list[0])
    # category and sentiment num_list
    res_lists = get_category_sentiment_num_list(args)
    args.category_num_list = res_lists[0]
    args.sentiment_num_list = res_lists[-1]

    # load data
    # dataset
    dev_dataset = ACOXSDataset(tokenizer, args, "dev")
    test_dataset = ACOXSDataset(tokenizer, args, "test")
    # dataloader
    dev_dataloader = DataLoader(dataset=dev_dataset, batch_size=args.eval_batch_size, collate_fn=collate_fn)
    test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.eval_batch_size, collate_fn=collate_fn)

    model = torch.load(args.checkpoint_path, map_location=device)
    model = model.to(device)

    start, end, step = args.alpha_start, args.alpha_end, args.alpha_step
    num_steps = int(round((end - start) / step)) + 1
    alpha_list = [round(x, 2) for x in np.linspace(start, end, num_steps)]
    print(alpha_list)

    start, end, step = args.beta_start, args.beta_end, args.beta_step
    beta_list = [i for i in range(start, end + 1, step)]

    start1, end1, step1 = args.delta_start, args.delta_end, args.delta_step
    delta_list = [i for i in range(start1, end1 + 1, step1)]

    best_f1 = .0
    dev_f1_list = []

    for a in alpha_list:
        args.alpha = a
        logger.debug(f'alpha is {a}, beta is {args.beta}')
        dev_results = do_eval(model, dev_dataloader)

        dev_f1 = dev_results['quintuple']['f1']
        if dev_f1 < best_f1:
            break
        best_f1 = dev_f1

        dev_f1_list.append(dev_results['quintuple']['f1'])

    best_dev_f1 = max(dev_f1_list)
    best_dev_alpha = dev_f1_list.index(best_dev_f1)
    logger.info(f'The best dev f1:{best_dev_f1}')

    logger.info(f'The best dev alpha:{alpha_list[best_dev_alpha]}')

    args.alpha = best_dev_alpha
    test_results = do_test(model, test_dataloader)
    logger.info(f'test result: {test_results}')


def do_inference(reviews):
    # ##########init model##########
    logger.info("Building MRC-CLRI Model...")
    category_list = get_aspect_category(args.task, args.data_type)
    args.category_dim = len(category_list[0])

    # category and sentiment num_list
    res_lists = get_category_sentiment_num_list(args)
    args.category_num_list = res_lists[0]
    args.sentiment_num_list = res_lists[-1]

    # detect device available for GPU
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # load checkpoint
    model = torch.load(args.checkpoint_path, map_location=device)
    model = model.to(device)

    trainer = ACOXSTrainer(logger, model, None, None, tokenizer, args)
    trainer.inference(reviews)


# 新增的
def add_spaces_to_text(text_list):
    return [" ".join(list(text)) for text in text_list]


if __name__ == '__main__':
    # ##########init args##########
    args = init_args()

    # ##########seed##########
    seed_everything(args.seed)

    # ##########创建目录##########
    output_path = os.path.join(args.output_dir, args.task, args.data_type)
    log_path = os.path.join(args.log_dir, args.task, args.data_type)
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    if not os.path.exists(log_path):
        os.makedirs(log_path)

    # ##########init logger##########
    log_path = os.path.join(log_path, time.strftime("%Y-%m-%d %H-%M-%S", time.localtime()) + ".log")
    logger = get_logger(log_path)

    # print args
    logger.info(args)

    # tokenizer
    tokenizer = AutoTokenizer.from_pretrained(args.model_path)

    if args.do_train:
        do_train()
        # do_acos_train()
    if args.do_optimized:
        do_optimized()
    if args.do_inference:
        # with open("./others/dataProcess/input.txt", "r", encoding="utf-8") as f:
        #     text = [line.strip() for line in f.readlines()]  # 去掉每行的换行符
        text = [
            '这 款 酸 奶 很 细 腻 ， 入 口 绵 滑 ， 带 有 淡 淡 的 草 莓 香 气 ， 甜 度 适 中 ， 但 是 后 味 有 点 酸 涩 。',
            '红 枣 味 特 别 浓 郁 ， 酸 奶 稍 微 有 点 粘 稠 ， 喝 到 最 后 舌 头 有 点 涩 。',
            '巧 克 力 味 浓 郁 ， 但 酸 奶 有 点 甜 过 头 ， 喝 完 之 后 有 种 腻 腻 的 感 觉 。',
            '蜂 蜜 香 味 特 别 浓 郁 ， 但 柚 子 的 酸 味 有 些 过 头 ， 回 味 稍 苦 。'
        ]
        do_inference(text)
    if args.do_test:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # ##########init model##########
        logger.info("Building MRC-CLRI Model...")
        category_list = get_aspect_category(args.task, args.data_type)
        args.category_dim = len(category_list[0])
        # category and sentiment num_list
        res_lists = get_category_sentiment_num_list(args)
        args.category_num_list = res_lists[0]
        args.sentiment_num_list = res_lists[-1]

        test_dataset = ACOXSDataset(tokenizer, args, "test")
        test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.eval_batch_size, collate_fn=collate_fn)

        model = torch.load(args.checkpoint_path, map_location=device)
        model = model.to(device)

        do_test(model, test_dataloader)
