import numpy as np
import multiprocessing
import argparse
import pandas as pd
import time
import math
import warnings
import re
import ast
import time
import csv
from MCTS.score import simplify_eq, score_with_est
from MCTS.spl_base_error_reward import SplBase
from MCTS.spl_task_utils import *
from collections import Counter
import subprocess
import boolean
import math
from torch.utils.tensorboard import SummaryWriter
warnings.filterwarnings("ignore", category=RuntimeWarning) 
warnings.filterwarnings("ignore", category=FutureWarning) 
warnings.filterwarnings("ignore", category=DeprecationWarning) 
class SPL():
    def __init__(
        self,
        x_train = None,
        y_train = None,
        x_test = None,
        y_test = None,
        task = None,
        num_run = None,
        transplant_step = None,
        motif_library = None,
        max_len = None,
        loss_type = None,
        epsilon = None,
        feature_selection = None
    ):
        self.x_train = x_train
        self.y_train = y_train
        self.x_test = x_test
        self.y_test = y_test
        self.task = task
        self.num_run = num_run
        self.transplant_step = transplant_step
        self.motif_library = motif_library
        self.max_len = max_len
        self.loss_type = loss_type
        self.epsilon = epsilon
        self.feature_selection = feature_selection

    def process(self):
        all_eqs, error_rates, success_rate = self.run_spl()
        return all_eqs, error_rates, success_rate
        # print('average discovery time is', np.round(np.mean(all_times), 3), 'seconds')    

    def run_spl(self):
        ## define production rules and non-terminal nodes. 
        # print('task is', task)
        input_var = len(self.x_train[0])
        # mathematical function
        # grammars = ['A->A+A', 'A->A-A', 'A->A*A', 'A->A/A', 'A->exp(A)', 'A->cos(A)', 'A->sin(A)']+ [f'A->x_{i}' for i in range(input_var)]
        # grammars = ['A->A+A', 'A->A-A', 'A->A*A', 'A->exp(A)', 'A->cos(A)', 'A->sin(A)']+ [f'A->x_{i}' for i in range(input_var)]
        
        # boolean function
        if self.feature_selection != 'lut':
            # grammars = ['A->A+A', 'A->A-A', 'A->A*A', 'A->A/A', 'A->exp(A)', 'A->cos(A)', 'A->sin(A)']+ [f'A->x_{i}' for i in range(input_var)]
            grammars = ['A->(A+A)', 'A->(A-A)', 'A->(A*A)', 'A->(A/A)', 'A->exp(A)', 'A->cos(A)', 'A->sin(A)']+ [f'A->x_{i}' for i in range(input_var)]
        elif self.feature_selection == 'lut':
            grammars = ['A->(1-A)', 'A->(A*A)'] + [f'A->x_{i}' for i in range(input_var)]
            # grammars = ['A->A+A', 'A->A-A', 'A->A*A', 'A->A/A', 'A->exp(A)', 'A->cos(A)', 'A->sin(A)']+ [f'A->x_{i}' for i in range(input_var)]

        if self.motif_library:
            pass
        nt_nodes = ['A']
        grammars = list(set(grammars)) #去掉列表中的重复元素
        print(grammars)
        # for _ in self.seed_list: 跑seed_list和跑多个num_run效果是一样的
        # if self.multiprocessing:
        #     with multiprocessing.Pool() as pool:
        #         args = [(x, y, grammars, nt_nodes, self.num_run, self.transplant_step, self.task, self.max_len, self.symbol, self.factor_variable_list, self.baseline_aig_nd) for x, y in zip(self.x_train, self.y_train)]
        #         results = pool.map(self.multiprocessing_run_spl, args)
        #         best_eqs, best_error_rates, success_rates = map(list, zip(*results))
        #         error_rates.append(np.average(best_error_rates))
        #         infix_exprs.append(best_eqs) 
        #         success_rate = len(np.where(error_rates == 0)[0]) / len(error_rates)
        #         infix_exprs = infix_exprs[error_rates.index(min(error_rates))]
        #         return error_rates, infix_exprs, success_rate               
        # else:
        args = self.x_train, self.y_train, grammars, nt_nodes, self.num_run, self.transplant_step, self.task, self.max_len, self.epsilon, self.feature_selection
        all_eqs, error_rates, success_rate = self.multiprocessing_run_spl(args)
        return all_eqs, error_rates, success_rate

    
    def multiprocessing_run_spl(self, args, eta = 0.9999, max_module_init = 10, num_aug = 5, exp_rate = 1/np.sqrt(2),
                                num_transplant = 20, norm_threshold=1e-5, count_success = True):
        """
        Executes the main training loop of Symbolic Physics Learner.
        
        Parametersd
        ----------
        task : String object.
            benchmark task name. 
        num_run : Int object.
            number of runs performed.
        transplant_step : Int object.
            number of iterations simulated for training between two transplantations. 
        max_len : Int object.
            maximum allowed length (number of production rules) of discovered equations.
        eta : Int object.
            penalty factor for rewarding. 
        max_module_init : Int object.
            initial maximum length for module transplantation candidates. 
        num_aug : Int object.
            number of trees for module transplantation. 
        exp_rate : Int object.
            initial exploration rate. 
        num_transplant : Int object.
            number of transplantation candidate update performed throughout traning. 
        norm_threshold : Float object.
            numerical error tolerance for norm calculation, a very small value. 
        count_success : Boolean object. 
            if success rate is recorded. 
            
        Returns
        -------
        all_eqs: List<Str>
            discovered equations. 
        all_error_rate: List<Str>?
            error_rates
        success_rate: float
            success_rate
        """
        
        x, y, grammars, nt_nodes, num_run, transplant_step, task, max_len, epsilon, feature_selection = args
        # input_num = len(x[0])
        train_sample = np.concatenate((x, y.reshape(-1,1)), axis=1).T
        test_sample = train_sample      
        num_success = 0
        all_times = []
        all_eqs = []
        error_rates = []
        ## number of module max size increase after each transplantation 
        module_grow_step = (max_len - max_module_init) / num_transplant
        start_time = time.time()
        for i_test in range(num_run):
            tb_logger = SummaryWriter(f'runs/num_run_{i_test}')
            print("test", i_test)
            best_solution = ('nothing', 0)

            exploration_rate = exp_rate
            max_module = max_module_init
            reward_his = []
            best_modules = []
            aug_grammars = []

            start_time = time.time()
            discovery_time = 0

            for i_itr in range(num_transplant):

                spl_model = SplBase(data_sample = train_sample,
                                    base_grammars = grammars, 
                                    aug_grammars = aug_grammars, 
                                    nt_nodes = nt_nodes, 
                                    max_len = max_len, 
                                    max_module = max_module,
                                    aug_grammars_allowed = num_aug,
                                    func_score = score_with_est, 
                                    loss_type = self.loss_type,
                                    epsilon = epsilon,
                                    feature_selection = feature_selection,
                                    tb_logger = tb_logger,
                                    input_num = len(x[0]),
                                    exploration_rate = exploration_rate, 
                                    eta = eta,
                                    )

                _, current_solution, good_modules, repeat_cnt, replace_cnt = spl_model.run(transplant_step, 
                                                                            num_play=10, 
                                                                            print_flag=True)
                # tb_logger.add_scalar(f'num_run_{i_test}_repeat_reward_num', repeat_cnt, global_step=i_itr)
                # tb_logger.add_scalar(f'num_run_{i_test}_replace_reward_num', replace_cnt, global_step=i_itr)
                print(f'the good_modules of {i_itr}th num_transplant are', good_modules)
                # gp_solution = ga.ga_play(current_solution)
                # current_solution += gp_solution
                
                end_time = time.time() - start_time

                if not best_modules:
                    best_modules = good_modules
                else:
                    best_modules = sorted(list(set(best_modules + good_modules)), key = lambda x: x[1])
                aug_grammars = [x[0] for x in best_modules[-num_aug:]]
                print(f'the aug_grammers of {i_itr}th num_transplant are', aug_grammars)

                # print([simplify_eq(x[2]) for x in best_modules[-num_aug:]])

                reward_his.append(best_solution[1])

                if current_solution[1] > best_solution[1]:
                    best_solution = current_solution
                # print(best_solution)
                max_module += module_grow_step
                exploration_rate *= 5

                # check if solution is discovered. Early stop if it is. 
                # test_score = score_with_est(simplify_eq(best_solution[0]), 0, test_sample, eta = eta)[0]
                test_score = score_with_est(best_solution[0], 0, test_sample, self.loss_type, epsilon, feature_selection, eta = eta)[0]
                current_test_score = score_with_est(current_solution[0], 0, test_sample, self.loss_type, epsilon, feature_selection, eta = eta)[0]
                current_focal_reward = score_with_est(current_solution[0], 0, test_sample, 'focal', epsilon, feature_selection, eta=eta)[0]
                current_mse_reward = score_with_est(current_solution[0], 0, test_sample, 'mse', epsilon, feature_selection, eta=eta)[0]
                tb_logger.add_scalar(f'num_run_{i_test}_current focal score', current_focal_reward, global_step=i_itr)
                tb_logger.add_scalar(f'num_run_{i_test}_current mse score', current_mse_reward, global_step=i_itr)
                tb_logger.add_scalar(f'num_run_{i_test}_current test score', current_test_score, global_step=i_itr)
                from o3_TPAMI_train import Trainer
                top_k_acc, y_prediction = Trainer.evaluate_baseline(current_solution[0], self.x_test, self.y_test, search_method='MCTS', feature_selection=feature_selection)
                for i in range(9):
                    tb_logger.add_scalar(f'num_run_{i_test}_top_{i+1}0%_acc', top_k_acc[i], global_step=i_itr)
                tb_logger.add_histogram(f'num_run_{i_test}_y_prediction', y_prediction, global_step=i_itr)
                if test_score >= 1 - norm_threshold:
                    num_success += 1
                    if discovery_time == 0:
                        discovery_time = end_time
                        all_times.append(discovery_time)
                    break
            error_rates.append(1-test_score)
            # all_eqs.append(simplify_eq(best_solution[0]))
            all_eqs.append(best_solution[0])
            print('\n{} tests complete after {} iterations.'.format(i_test+1, i_itr+1))
            # # print('best solution: {}'.format(simplify_eq(best_solution[0])))
            print('best solution: {}'.format(best_solution[0]))
            print('test score: {}'.format(test_score))
            # init_nd, optimized_nd = self.get_nd(best_solution[0], input_num)
            # self.tb_logger.add_scalar('best reward', best_solution[1], i_test)
            # self.save_best_eq_every_num_run(best_solution[0], symbol, i_test, input_num, factor_variable_list)
        end_time = time.time()
        print('file name is', task)
        print('MCTS all duartion time is {}'.format(end_time - start_time))
        success_rate = num_success / num_run
        # best_error_rate, best_eq = self.backend_optimize(error_rates, all_eqs, x, symbol, factor_variable_list)
        # best_error_rate_index = np.argmin(error_rates) #这里error_rate最小的里面选择aig优化最优的
        # best_error_rate = error_rates[best_error_rate_index]
        # best_eq = all_eqs[best_error_rate_index]
        if count_success:
            print('success rate :', success_rate)
        if all([all_eqs[0] == all_eq for all_eq in all_eqs]):
            print('num_run is different')
        else:
            print('num run is same')
        # with open('all_eqs.csv', 'w') as csv_file:
        #     fieldnames = ['num_run', 'best_eq', 'best_reward']
        #     writer = csv.DictWriter(csv_file, fieldnames = fieldnames)
        #     writer.writeheader()
        #     for i in range(len(all_eqs)):
        #         writer.writerow({
        #             'num_run': i,
        #             'best_eq': all_eqs[i],
        #             'best_reward': 1 - error_rates[i]
        #         })
        # save_stats = {
        #     'file_name': task,
        #     'success rate': success_rate,
        #     'best_error_rate': best_error_rate,
        #     'best_eq': best_eq,
        #     'test_score': test_score
        # }
        # field_names = ['file_name', 'success rate', 'best_error_rate', 'best_eq', 'test_score']
        # with open(self.csv_save_path, 'a') as csv_file:
        #     writer = csv.DictWriter(csv_file, field_names=field_names)
        # print(print_stats)
        return all_eqs, error_rates, success_rate  
    
    def get_complexity(self, eq):
        expr_list = re.findall(r'\([^()]+\)', eq) 
        complexity = eq.count('+') + eq.count('*') + eq.count('!') + eq.count('x') - sum([(key.count('*') + key.count('+') + key.count('!') + key.count('x')) * (frequency-1) for key, frequency in Counter(expr_list).items()])
        return complexity
    
    def get_nd(self, init_eq, input_num, symbol, factor_variable_list):
        if init_eq == 'nothing':
            return 0, 0
        init_eq = self.convert_logic_string(init_eq)
        # print('optimized_eqs are', optimized_eq)
        temp_eqn_file_path = f'./temp_init_eq_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}_every_num_run.txt'
        with open(temp_eqn_file_path, 'w') as f:
            inorder_row = 'INORDER = ' + ''.join([f'x_{k} ' for k in range(input_num)]) + ';' + '\n'
            f.writelines(inorder_row)
            outorder_row = 'OUTORDER = ' + 'F_0' + ';' + '\n'
            f.writelines(outorder_row)
            f.write('F_0' + '=' + init_eq + ';' + '\n')  
        command_truth = f"/yqbai/boolfuncgen/motivations/abc -c 'read_eqn {temp_eqn_file_path}; strash; print_stats;'"
        output = subprocess.check_output(command_truth, shell=True)
        init_nd = int(re.search(r'and\s+=\s+(\d+)', output.decode('utf-8')).group(1))
        algebra = boolean.BooleanAlgebra()
        optimized_eq = algebra.parse(init_eq, simplify=True)
        optimized_eq = self.convert_logic_string(str(optimized_eq))
        # print('optimized_eqs are', optimized_eq)
        temp_eqn_file_path = f'./temp_optimized_eq_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}_every_num_run.txt'
        with open(temp_eqn_file_path, 'w') as f:
            inorder_row = 'INORDER = ' + ''.join([f'x_{k} ' for k in range(input_num)]) + ';' + '\n'
            f.writelines(inorder_row)
            outorder_row = 'OUTORDER = ' + 'F_0' + ';' + '\n'
            f.writelines(outorder_row)
            f.write('F_0' + '=' + optimized_eq + ';' + '\n')  
        command_truth = f"/yqbai/boolfuncgen/motivations/abc -c 'read_eqn {temp_eqn_file_path}; strash; print_stats;'"
        output = subprocess.check_output(command_truth, shell=True)
        optimized_nd = int(re.search(r'and\s+=\s+(\d+)', output.decode('utf-8')).group(1))
        return init_nd, optimized_nd
        
    def convert_logic_string(self, s):
        s = s.replace("~", "!")
        s = s.replace("&", "*")
        s = s.replace("|", "+")
        return s
    
    def save_best_eq_every_num_run(self, init_eq, symbol, num_run, input_num, factor_variable_list):
        init_eq = self.convert_logic_string(str(init_eq))
        temp_eqn_file_path = f'./best_init_eq_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}_num_run_{num_run}.txt'
        with open(temp_eqn_file_path, 'w') as f:
            inorder_row = 'INORDER = ' + ''.join([f'x_{k} ' for k in range(input_num)]) + ';' + '\n'
            f.writelines(inorder_row)
            outorder_row = 'OUTORDER = ' + 'F_0' + ';' + '\n'
            f.writelines(outorder_row)
            f.write('F_0' + '=' + init_eq + ';' + '\n')  
        algebra = boolean.BooleanAlgebra()
        optimized_eq = algebra.parse(init_eq, simplify=True)
        optimized_eq = self.convert_logic_string(str(optimized_eq))
        temp_eqn_file_path = f'./best_optimized_eq_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}_num_run_{num_run}.txt'
        with open(temp_eqn_file_path, 'w') as f:
            inorder_row = 'INORDER = ' + ''.join([f'x_{k} ' for k in range(input_num)]) + ';' + '\n'
            f.writelines(inorder_row)
            outorder_row = 'OUTORDER = ' + 'F_0' + ';' + '\n'
            f.writelines(outorder_row)
            f.write('F_0' + '=' + optimized_eq + ';' + '\n')  
            
    def backend_optimize(self, error_rates, all_eqs, x, symbol, factor_variable_list):
        input_num = int(math.log2(len(x)))
        min_error_rate = np.min(error_rates)
        min_indicies = np.where(error_rates == min_error_rate)[0]
        print('min_indicies are', min_indicies)
        best_eqs = [all_eqs[i] for i in min_indicies]
        # factor_variable_list = str(self.factor_variable_list)
        with open(f'./temp_expr_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}.csv', 'w') as csv_file:
            writer = csv.writer(csv_file)
            # 写入一行数据
            writer.writerow(['init_expr', 'optimized_expr', 'init_and', 'optimized_and', 'init_complexity', 'optimized_complexity', 'error_rate'])
        init_eqs = []
        optimized_eqs = []
        best_eqs_and = []
        init_eqs_and = []
        optimized_eqs_and = []
        init_eqs_complexity = []
        optimized_eqs_complexity = []
        for i in range(len(all_eqs)):
            init_eq = self.convert_logic_string(str(all_eqs[i]))
            # print('optimized_eqs are', optimized_eq)
            temp_eqn_file_path = f'./temp_init_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}.txt'
            with open(temp_eqn_file_path, 'w') as f:
                inorder_row = 'INORDER = ' + ''.join([f'x_{k} ' for k in range(input_num)]) + ';' + '\n'
                f.writelines(inorder_row)
                outorder_row = 'OUTORDER = ' + 'F_0' + ';' + '\n'
                f.writelines(outorder_row)
                f.write('F_0' + '=' + init_eq + ';' + '\n')  
            command_truth = f"/yqbai/boolfuncgen/motivations/abc -c 'read_eqn {temp_eqn_file_path}; strash; print_stats;'"
            output = subprocess.check_output(command_truth, shell=True)
            init_eqs.append(init_eq)
            init_eqs_and.append(int(re.search(r'and\s+=\s+(\d+)', output.decode('utf-8')).group(1)))
            init_eq_complexity = self.get_complexity(init_eq)
            init_eqs_complexity.append(init_eq_complexity)
            algebra = boolean.BooleanAlgebra()
            optimized_eq = algebra.parse(all_eqs[i], simplify=True)
            optimized_eq = self.convert_logic_string(str(optimized_eq))
            # print('optimized_eqs are', optimized_eq)
            temp_eqn_file_path = f'./temp_optimized_expr_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}.txt'
            with open(temp_eqn_file_path, 'w') as f:
                inorder_row = 'INORDER = ' + ''.join([f'x_{k} ' for k in range(input_num)]) + ';' + '\n'
                f.writelines(inorder_row)
                outorder_row = 'OUTORDER = ' + 'F_0' + ';' + '\n'
                f.writelines(outorder_row)
                f.write('F_0' + '=' + optimized_eq + ';' + '\n')  
            command_truth = f"/yqbai/boolfuncgen/motivations/abc -c 'read_eqn {temp_eqn_file_path}; strash; print_stats;'"
            output = subprocess.check_output(command_truth, shell=True)
            optimized_eqs.append(optimized_eq)
            optimized_eqs_and.append(int(re.search(r'and\s+=\s+(\d+)', output.decode('utf-8')).group(1)))
            optimized_eq_complexity = self.get_complexity(optimized_eq)
            optimized_eqs_complexity.append(optimized_eq_complexity)
            if error_rates[i] == min_error_rate:
                best_eqs_and.append(int(re.search(r'and\s+=\s+(\d+)', output.decode('utf-8')).group(1)))
        with open(f'./temp_expr_output_index_{self.output_index}_symbol_{symbol}_factor_list_{"_".join([str(factor_variable) for factor_variable in factor_variable_list])}.csv', 'a') as csv_file:
            writer = csv.writer(csv_file)
            for init_eq, optimized_eq, init_eq_and, optimized_eq_and, init_eq_complexity, optimized_eq_complexity, error_rate in zip(init_eqs, optimized_eqs, init_eqs_and, optimized_eqs_and, init_eqs_complexity, optimized_eqs_complexity, error_rates):
                writer.writerow([init_eq, optimized_eq, init_eq_and, optimized_eq_and, init_eq_complexity, optimized_eq_complexity, error_rate])
        print('best_eqs_and are', best_eqs_and)
        best_eq = best_eqs[best_eqs_and.index(min(best_eqs_and))]
        return min_error_rate, best_eq
    
if __name__ == "__main__":
    parser = argparse.ArgumentParser('SPL')
    parser.add_argument('--benchmark_path', default='/yqbai/boolfuncgen/benchmark/iwls2023/very_small_truth', type=str)
    # parser.add_argument('--RSRM_config_path', default='../RSRM/config/config.json', type=str)
    parser.add_argument('--task', default='truth', type=str)
    parser.add_argument('--save_name', default='iwls2023_very_small_truth_motif_SPL', type=str)
    parser.add_argument('--num_run', default=5, type=int)
    parser.add_argument('--transplant_step', default=10000, type=int)
    args = parser.parse_args()
    spl = SPL(
        args.benchmark_path,
        args.task,
        args.save_name,
        args.num_run,
        args.transplant_step
    )
    spl.process()
    # csv_save_path = os.path.join('./csv', args.save_name + '.csv')
    # with open (csv_save_path, 'w') as csv_file:
    #     fieldnames = ['file_name', 'input_num', 'output_num', 'best_eq', 'best_error_rate']
    #     writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    #     writer.writeheader()
    # for truth_file in os.listdir(args.benchmark_path):
    #     if 'ex99' in truth_file or 'ex12' in truth_file or 'ex07' in truth_file:
    #         truth_file_name = re.match(r'(ex\d+)', truth_file).group(1)
    #         truth_file_path = os.path.join(args.benchmark_path, truth_file)
    #         ga = None
    #         best_eqs, best_error_rates, input_num, output_num, _, all_times= run_spl(task=truth_file_name, 
    #                                                                                 truth_file_path=truth_file_path,
    #                                                                                 num_run=args.num_run, 
    #                                                                                 transplant_step=args.transplant_step,
    #                                                                                 ga=ga,
    #                                                                                 random_num=args.random_num,
    #                                                                                 selected_variables_num=args.selected_variables_num)
    #         output_folder = 'results_dump/' ## directory to save discovered results
    #         save_eqs = True                ## if true, discovered equations are saved to "output_folder" dir                                  
    #         if save_eqs:
    #             output_file = open(output_folder + args.save_name + '_eqn.txt', 'w')
    #             for i, eq in enumerate(best_eqs):
    #                 output_file.write(f'F_{i}' + '=' + eq + ';' + '\n')
    #             output_file.close()
    #         with open (csv_save_path, 'a') as csv_file:
    #             writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    #             writer.writerow({
    #                         'file_name':truth_file_name, 
    #                         'input_num':input_num,
    #                         'output_num':output_num,
    #                         'best_eq':best_eqs, 
    #                         'best_error_rate':best_error_rates
    #                         })
    #         # print('success rate :', "{:.0%}".format(success_rate))
    #         print('average discovery time is', np.round(np.mean(all_times), 3), 'seconds')     
    # with multiprocessing.Pool() as pool:
    #     for i in range(args.multiprocessing_num):
    #         # 计算子列表的开始索引和结束索引
    #         start_index = i * len(truth_file_name) // args.multiprocessing_num
    #         end_index = min((i + 1) * len(truth_file_name) // args.multiprocessing_num, len(truth_file_name))
    #         num_run = 5
    #         transplant_step = 10000
    #         # config = Config()
    #         # config.json(args.RSRM_config_path)
    #         # ga = GAPipeline(config)
    #         # 切片获取子列表
    #         args = [(file_name, args.task, os.path.join(args.benchmark_path, file_name + '.truth'), num_run, transplant_step, args.csv_save_path) for file_name in truth_file_name[start_index:end_index]]
    #         results = pool.map(main_process, args)
    #         print(results)
            # file_names, input_nums, output_nums, best_eqs, best_error_rates, ga = map(list, zip(*results))
            # with open (args.csv_save_path, 'a') as csv_file:
            #     writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
            #     for file_name, input_num, output_num, best_eq, best_error_rate in zip(file_names, input_nums, output_nums, best_eqs, best_error_rates):
            #         writer.writerow({
            #                     'file_name':file_name, 
            #                     'input_num':input_num,
            #                     'output_num':output_num,
            #                     'best_eq':best_eq, 
            #                     'best_error_rate':best_error_rate
            #                     })

