import argparse, time, torch, random, numpy as np

from model.comodel import ComModel
from utils.raw_data import RawData
from utils.dataloader import MyDataLoader, SentenceInstance
from fastNLP import cache_results
from transformers import BertTokenizer
from utils.logger import Logger, global_time_str
from trainer import train, test
import os


def seed_everything(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)


parser = argparse.ArgumentParser()

parser.add_argument('--mode', type=str, choices=["train", "test"],
                    default='train')
parser.add_argument('--batch_size', type=int, default=8 if torch.cuda.is_available() else 4)
parser.add_argument('--device', type=str, default="cuda:0" if torch.cuda.is_available() else "cpu")
parser.add_argument('--train_epochs', type=int, default=100 if torch.cuda.is_available() else 4)
parser.add_argument('--seed', type=int, default=2025)
parser.add_argument('--data_dir', type=str,
                    default='../Dataset/ASTE-Data-V2/14res')  # 数据集路径
parser.add_argument('--cache_path', type=str,
                    default="../main_cache",
                    help='pretrained bert model cache path')
parser.add_argument('--bert_type', type=str, default="bert-base-uncased", )
parser.add_argument("--lr", type=float, default=9e-5)
parser.add_argument('--use_query', type=int, default=1)  # 是否使用query
parser.add_argument('--use_context_augmentation', type=int, default=1)  # -1:不增强 0:线性增强 1:指数增强
parser.add_argument('--a', type=float, default=5)  # inference parameter
parser.add_argument('--a_ww', type=float, default=0.5)
parser.add_argument('--b', type=float, default=11)
parser.add_argument('--b_ww', type=float, default=1)
parser.add_argument('--use_c', type=int, default=1)
parser.add_argument('--max_length', type=int, default=100)

args = parser.parse_args()

save_dir = '../COM-MRC-code/result'

args.model_dir = f"{save_dir}/COMMRC-{args.seed}"
file_path = f"{save_dir}/COMMRC-{args.seed}"
if os.path.exists(file_path):
    file_path = f"{save_dir}/COMMRC-{args.seed}/{global_time_str}-{args.a}-{args.a_ww}-{args.b}-{args.b_ww}"
logger_class = Logger(filename='COMMRC', filepath=file_path)
logger = logger_class.logger
seed_everything(args.seed)

tokenizer = BertTokenizer.from_pretrained(args.bert_type, cache_dir=args.cache_path)

args.text = 'Find the first aspect term and corresponding opinion term in the text'  # 14
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
if args.use_query == 0:
    args.text = ''  # 2
args.sen_pre_len = len(tokenizer.encode(args.text))


# @cache_results(
#     f"./caches/{args.data_dir[-5:]}.pkl",_refresh=False
# )
def load_data_instances():
    raw_train_data = RawData(f'{args.data_dir}/train_triplets.txt')
    instance_train_list = []
    for data in raw_train_data:
        instance_train_list.append(
            SentenceInstance(data, tokenizer, args.max_length, plus_text=args.text)
        )
        pass

    raw_dev_data = RawData(f'{args.data_dir}/dev_triplets.txt')
    aspect_golden_set = raw_dev_data.get_all_aspect_set()
    opinion_golden_set = raw_dev_data.get_all_opinion_set()

    triplets_golden_set = raw_dev_data.all_triplets
    multi_set = raw_dev_data.all_multi_triplets
    single_set = raw_dev_data.all_single_triplets
    multi_aspect_id = raw_dev_data.multi_aspect_id

    pair_golden_set = raw_dev_data.get_all_pair_set()
    as_golden_set = raw_dev_data.get_all_as_set()
    triplets_goden_dict = raw_dev_data.get_triplets_dict()

    instance_dev_list = []
    for data in raw_dev_data:
        instance_dev_list.append(
            SentenceInstance(data, tokenizer, args.max_length, plus_text=args.text)
        )
        pass

    raw_test_data = RawData(f'{args.data_dir}/test_triplets.txt')
    aspect_golden_set2 = raw_test_data.get_all_aspect_set()
    opinion_golden_set2 = raw_test_data.get_all_opinion_set()
    triplets_golden_set2 = raw_test_data.all_triplets

    multi_set2 = raw_test_data.all_multi_triplets
    single_set2 = raw_test_data.all_single_triplets
    multi_aspect_id2 = raw_test_data.multi_aspect_id

    pair_golden_set2 = raw_test_data.get_all_pair_set()
    as_golden_set2 = raw_test_data.get_all_as_set()
    triplets_golden_dict2 = raw_test_data.get_triplets_dict()

    instance_test_list = []
    for data in raw_test_data:
        instance_test_list.append(
            SentenceInstance(data, tokenizer, args.max_length, plus_text=args.text)
        )
        pass

    return instance_train_list, instance_dev_list, instance_test_list, \
           [aspect_golden_set, opinion_golden_set, (triplets_golden_set, multi_set, single_set, multi_aspect_id),
            pair_golden_set, as_golden_set, raw_dev_data.multi_aspect_id, triplets_goden_dict], \
           [aspect_golden_set2, opinion_golden_set2, (triplets_golden_set2, multi_set2, single_set2, multi_aspect_id2),
            pair_golden_set2, as_golden_set2, raw_test_data.multi_aspect_id, triplets_golden_dict2],


instances_train, instances_dev, instances_test, three_golden_set_dev, three_golden_set_test = load_data_instances()
train_data_loader = MyDataLoader(instances_train, tokenizer, args)
dev_data_loader = MyDataLoader(instances_dev, tokenizer, args, is_test=True)
test_data_loader = MyDataLoader(instances_test, tokenizer, args, is_test=True)
train(args, train_data_loader, dev_data_loader, test_data_loader, three_golden_set_dev, three_golden_set_test, logger)

# model = comModel(args).to(args.device)
# test(args,model, test_data_loader, three_goden_set_test, logger,
#      is_test=True,
#      model_dir='***.pth')
