import os
import re
import time
import shutil
from tqdm import tqdm
import random
import logging
import argparse

import numpy as np

import json

def parse_args(args=None):
    server_list = ["234-2", "cluster"]
    parser = argparse.ArgumentParser(description="Arguments for relation generation.")
    parser.add_argument('location', choices=server_list,
                        help='Indicate the server this script is running on.')
    parser.add_argument('--data_dir', required=True, nargs="+", type=str,
                        help="Path to data to process. Multiple paths can be specified. (absolute)")
    parser.add_argument("--train_dir", default=None, type=str, 
                        help="Path to where the training data is saved. (absolute)")
    parser.add_argument("--eval_dir", default=None, type=str, 
                        help="Path to where the evaluation data is saved. (absolute)")
    parser.add_argument('--save_interval', type=int, required=True,
                        help="Save interval.")
    parser.add_argument('--seed', type=int, required=True,
                        help="Random seed.")
    parser.add_argument('--dev_ratio', type=float,
                        help="Ratio of eval data to all data.")
    parser.add_argument("--dev_bound", nargs=2, type=int,
                        help="Specify lower and upper bound for the number of dev examples."
                        "(use -1 to set no bound for corresponding side.)")
    
    if args == None:
        return parser.parse_args()
    else:
        return parser.parse_args(args)

def get_files(root_dir, arg_name):
    if not os.path.isdir(root_dir):
        raise ValueError(f"Invalid directory for {arg_name}")
    files = os.listdir(root_dir)
    if files == None or len(files) == 0:
        raise ValueError(f"No data found under {arg_name}.")
    files = [ os.path.join(root_dir, filename) for filename in files 
                if filename[-9:] == "jsonlines" ]
    # random.shuffle(files)
    return files

def setup_training(args):
    if args.do_train:
        train_files = get_files(args.train_dir, "train_dir")
        if args.eval_interval > 0:
            dev_files = get_files(args.eval_dir, "eval_dir")
        else:
            dev_files = None
        return dev_files, train_files
    elif args.do_eval:
        dev_files = get_files(args.eval_dir, "eval_dir")
        return dev_files, None
    else:
        raise NotImplementedError

def check_save_path(save_path, forced_write=False):
    if os.path.isdir(save_path) and len(os.listdir(save_path)) != 0:
        if forced_write:
            shutil.rmtree(save_path)
        else:
            raise ValueError(f"Warning: save path {save_path} is not empty. Exit.")
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    if not os.path.isdir(save_path):
        raise ValueError(f"Invalid save path {save_path}. Exit.")

class RelationDataset:
    def __init__(self, file_list, args):
        self.train_dir = args.train_dir
        self.eval_dir = args.eval_dir
        self.save_interval = args.save_interval
        self.file_list = file_list
        self.examples = []
        self.load_examples()
        self.dev_ratio = args.dev_ratio
        self.dev_lo, self.dev_hi = args.dev_bound
        total_count = len(self.examples)
        dev_count = total_count * self.dev_ratio
        if self.dev_lo > 0 and dev_count < self.dev_lo:
            dev_count = self.dev_lo
        if self.dev_hi > 0 and dev_count > self.dev_hi:
            dev_count = self.dev_hi
        self.dev_count = int(dev_count)

        self.stc_template = "data_cnt_{:08d}.jsonlines"
    
    def load_examples(self):
        self.examples = []
        discarded_examples = 0
        for file_path in tqdm(self.file_list):
        # file_path = os.path.join(self.train_dir, file_name)
            with open(file_path, "r") as f:
                for line in f:
                    example = json.loads(line.strip())
                    self.examples.append(example)
        example_num = len(self.examples)
        print(f"Loaded examples: {example_num} ")
        
    def shuffle_examples(self):
        random.shuffle(self.examples)

    def get_train_eval_data(self):
        # return ( eval data, train data )
        return self.examples[:self.dev_count], self.examples[self.dev_count:]
    
    def save_data(self):
        def save_part(lo, hi, step, root):
            for start in tqdm(range(lo, hi, step)):
                end = start + step
                if end > hi:
                    end = hi
                save_path = os.path.join(root, self.stc_template.format(end))
                with open(save_path, 'w') as f:
                    for example in self.examples[start:end]:
                        f.write(json.dumps(example) + '\n')

        step = self.save_interval
        save_part(0, self.dev_count, step, self.eval_dir)
        save_part(self.dev_count, len(self.examples), step, self.train_dir)

    def __len__(self):
        return len(self.examples)

    def __getitem__(self, index):     
        return self.examples[index]

def main():
    args = parse_args()
    random.seed(args.seed)
    np.random.seed(args.seed)
    input_data_paths = args.data_dir
    check_save_path(args.train_dir)
    check_save_path(args.eval_dir)
    files = []
    for data_path in input_data_paths:
        part_files = get_files(data_path, data_path)
        files.extend(part_files)

    dataset = RelationDataset(files, args)
    dataset.shuffle_examples()
    dataset.save_data()
    

if __name__=="__main__":
    main()