import argparse
import random

import numpy as np
import torch
from torch.utils.data import DataLoader
from utils.raw_data1 import RawData, generate_test_data_from_raw_data
from transformers import BertTokenizer
from utils.logger import Logger, global_time_str
from utils import get_current_timestamp
from utils.dataset import TrainDataSet, TestDataSet
from utils.dataloader import TestDataLoader, build_collate_fn
from trainer1 import train
import os

parser = argparse.ArgumentParser()

parser.add_argument('--mode', type=str, choices=["train", "dev", "test"], default='train')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--device', type=str, default="cuda:0" if torch.cuda.is_available() else "cpu")
parser.add_argument('--train_epochs', type=int, default=90 if torch.cuda.is_available() else 4)
parser.add_argument('--seed', type=int, default=2025)
parser.add_argument('--data_dir', type=str, default='../Dataset/ASTE-Data-V2/14res')  # 数据集路径
parser.add_argument('--cache_path', type=str, default="../main_cache", help='pretrained bert model cache path')
parser.add_argument('--bert_type', type=str, default="bert-base-uncased", )
parser.add_argument("--lr", type=float, default=9e-5)
parser.add_argument('--use_query', type=int, default=1)  # 是否使用 query
parser.add_argument('--use_context_augmentation', type=int, default=1)  # -1:不增强 0:线性增强 1:指数增强
parser.add_argument('--a', type=float, default=1)  # inference parameter
parser.add_argument('--a_ww', type=float, default=0.5)
parser.add_argument('--b', type=float, default=11)
parser.add_argument('--b_ww', type=float, default=1)
parser.add_argument('--use_c', type=int, default=1)
parser.add_argument('--max_length', type=int, default=100)

args = parser.parse_args()

result_save_dir = '../LK-MRC-code/result'

args.model_dir = f"{result_save_dir}/COMMRC-{args.seed}"
result_parent_dir_path = f"{result_save_dir}/COMMRC-{args.seed}"
if os.path.exists(result_parent_dir_path):
    result_parent_dir_path = f"{result_save_dir}/COMMRC-{args.seed}/{global_time_str}-{args.a}-{args.a_ww}-{args.b}-{args.b_ww}"
    pass
logger_class = Logger(filepath=result_parent_dir_path)
logger = logger_class.logger


def seed_everything(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    pass


if __name__ == '__main__':
    seed_everything(args.seed)
    pass

if args.use_query == 0:  # 如果不使用 query 的方式
    args.text = ''  # 2
    pass
else:  # 如果 use_query == 1，也就是使用了 query
    args.text = 'Find the first aspect term and corresponding opinion term in the text'
    pass

local_bert_model_path = "bert_model/" + args.bert_type
if __name__ == '__main__':
    logger.info(f"local_bert_model_path is {local_bert_model_path}")
    pass
bert_tokenizer = BertTokenizer.from_pretrained(local_bert_model_path)
args.sen_pre_len = len(bert_tokenizer.encode(args.text))  # 14
if __name__ == '__main__':
    logger.info(f"args.sen_pre_len is set as {args.sen_pre_len}")  # 也就是说这个数字是包含了最开始的 101 和最后的 102
    pass

# 弄完了所有的 args，打印一下所有的参数
if __name__ == '__main__':
    for arg_key in vars(args):
        logger.info(f"args.{arg_key} = {getattr(args, arg_key)}")
        pass
    pass


if __name__ == '__main__':
    logger.info(f"__main__(): Start loading data instances! Current time: {get_current_timestamp()}")

    # get train data
    train_raw_data = RawData(f'{args.data_dir}/train_triplets.txt', tokenizer=bert_tokenizer, args=args)
    train_data_set = TrainDataSet(train_raw_data, bert_tokenizer)
    train_data_loader = DataLoader(
        dataset=train_data_set,
        batch_size=args.batch_size,
        shuffle=True,
        collate_fn=build_collate_fn(bert_tokenizer, args)
    )

    # get dev data 用来验证的数据
    dev_raw_data = RawData(f'{args.data_dir}/dev_triplets.txt', tokenizer=bert_tokenizer, args=args)
    three_golden_set_dev = generate_test_data_from_raw_data(dev_raw_data)
    dev_data_set = TestDataSet(dev_raw_data, bert_tokenizer)
    dev_data_loader = TestDataLoader(dev_data_set, args=args, tokenizer=bert_tokenizer)

    # get test data
    test_raw_data = RawData(f'{args.data_dir}/test_triplets.txt', tokenizer=bert_tokenizer, args=args)
    three_golden_set_test = generate_test_data_from_raw_data(test_raw_data)
    test_data_set = TestDataSet(test_raw_data, bert_tokenizer)
    test_data_loader = TestDataLoader(test_data_set, args=args, tokenizer=bert_tokenizer)

    # Start training、dev or test
    logger.info(f"__main__(): {args.mode} start at {get_current_timestamp()}")
    train(
        args=args,
        train_data_loader=train_data_loader,
        dev_data_loader=dev_data_loader,
        test_data_loader=test_data_loader,
        three_golden_set_dev=three_golden_set_dev,
        three_golden_set_test=three_golden_set_test,
        logger=logger
    )
    logger.info(f'__main__(): End at {get_current_timestamp()}')
    pass
