# -*- coding: utf-8 -*-
import datetime
from pathlib import Path
from abc import ABC, abstractmethod
import pandas as pd
from gwlsa import models, datasets
from utils.general_utils import timer


class ProcessingContext:
    def __init__(self, data, **params):
        self.data = data
        self.params = params

    def __str__(self) -> str:
        ss = []
        if self.data is not None and type(self.data)==dict:
            ss.append(str(self.data))
            ss.append(str(self.params))
        return ss
    def __repr__(self) -> str:
        return self.__str__()

class ProcessingStrategy(ABC):
    @abstractmethod
    def process(self, context):
        """Process the data in context and update the context."""
        return context

class ProcessingChainBuilder:
    def __init__(self):
        self.strategies = []

    def add_strategy(self, strategy):
        self.strategies.append(strategy)
        return self

    def build(self):
        return ProcessingChain(self.strategies)

class ProcessingChain:
    def __init__(self, strategies):
        self.strategies = strategies

    def process(self, context):
        for strategy in self.strategies:
            context = strategy.process(context)
        return context

class Net_constructor(ProcessingStrategy):
    def process(self, context):
        context = super().process(context)

        out_folder = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
        out_folder = 'gwlsa' + out_folder

        base_path = Path(context.params['train_outputs'])
        resolved_base_path = base_path.resolve()
        out_path = resolved_base_path / out_folder

        # 将当前执行的源码写入到文件
        f_name = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
        write_src_to_txt(out_path, f_name)
        log_file_path = out_path / f'{f_name}.log'
        loss_pic_path = out_path / f'{f_name}_loss.png'

        train_dataset, val_dataset, test_dataset = datasets.init_dataset(data_dir=context.params['data_load_dir'],
                                                                         x_column=context.params['x_column_names'],
                                                                         y_column=[context.params['y_column_name']],
                                                                         spatial_column=context.params['spatial_column_names'],
                                                                         batch_size=context.params['batch_size'],
                                                                         max_val_size=context.params['max_val_size'],
                                                                         max_test_size=context.params['max_test_size'],
                                                                         id_column=context.params['id_column'],
                                                                         buffer_distance=context.params['max_distance'],
                                                                         resolution=context.params['resolution'],
                                                                         num_works=context.params['num_workers']
                                                                         )

        gnnwr = models.GNNWR(train_dataset, val_dataset, test_dataset,
                             start_lr=context.params['lr'],
                             optimizer=context.params['optimizer'],
                             optimizer_params={'maxlr':0.99, 'minlr': 0.0001, 'decayrate': 0.01, 'stop_lr': 0.00005},
                             activate_func=context.params['activate_func'],
                             drop_out=context.params['drop_out'],
                             dense_layers=context.params['dense_layers'],
                             model_name=Path(f_name),
                             model_save_path=out_path,
                             write_path=out_path,
                             log_path=out_path,
                             log_file_name=log_file_path,
                             train_outputs=out_path,
                             F_test=False,
                             show_train_metrics=context.params['show_train_metrics'],
                             resolution=context.params['resolution'],
                             max_distance=context.params['max_distance'],
                             spatial_columns=context.params['spatial_column_names'],
                             id_column=context.params['id_column'],
                             batch_size=context.params['batch_size']
                             )
        new_data = {'train_dataset': train_dataset, 'val_dataset': val_dataset, 'test_dataset': test_dataset, 'model': gnnwr}
        context.params['log_save_path'] = log_file_path
        context.params['loss_pic_path'] = loss_pic_path
        context.params['out_path'] = out_path
        context.params['f_name'] = f_name
        return ProcessingContext(new_data, **context.params)

class Net_trainer(ProcessingStrategy):
    @timer
    def process(self, context):
        gnnwr = context.data['model']
        max_epochs = context.params['max_epochs']
        print_fre=10
        if max_epochs<10:
            print_fre = 1
        gnnwr.run(max_epochs, print_frequency=print_fre)
        if context.params['show_bestAIC_result']:
            gnnwr.result(postfix='.bestAIC.pkl')
        if context.params['show_bestAUC_result']:
            gnnwr.result(postfix='.bestAUC.pkl')
        if context.params['show_bestR2_result']:
            gnnwr.result(postfix='.bestR2.pkl')

        # 保存预测结果
        f_name = context.params['f_name']
        result_csv_filename = context.params['out_path'] / f'predRe{f_name}.csv'
        gnnwr.reg_result(filename=result_csv_filename, postfix=context.params['model_postfix'])

        return ProcessingContext(context.data, **context.params)

class Net_loader(ProcessingStrategy):
    @timer
    def process(self, context):
        gnnwr = context.data['model']
        model_path = context.params['load_model_path']

        # 获取预测精度
        gnnwr.load_model(model_path)
        gnnwr.result(postfix=context.params['model_postfix'])

        # 保存预测结果
        f_name = context.params['f_name']
        result_csv_filename = context.params['out_path'] / f'predRe{f_name}.csv'
        gnnwr.reg_result(filename=result_csv_filename, postfix=context.params['model_postfix'])

        return ProcessingContext(context.data, **context.params)

def write_src_to_txt(out_path:Path, f_name:str, src_name='gwlsa_settings.py'):
    # 获取当前执行的Python脚本的绝对路径
    current_path = Path(__file__).parent.parent
    script_path = current_path / src_name
    # 打开当前脚本文件
    with open(script_path, 'r', encoding='utf-8') as script_file:
        # 读取脚本内容
        script_content = script_file.read()
    # 创建一个新的文本文件，以脚本的名称命名，并在末尾添加.txt后缀
    if not out_path.exists():
        out_path.mkdir(parents=True)
    f_name = f_name + "_src.txt"
    write_src_path = out_path / f_name
    # 打开或创建一个新的文本文件以写入脚本内容
    with open(write_src_path, 'w+') as output_file:
        # 写入脚本内容
        output_file.write(script_content)
    print(f"脚本内容已成功写入到 {write_src_path}")

def split_csv_train_val_test(raw_data_path:str,
                             x_column_names:list,
                             y_column_name:str,
                             spatial_column_names:list,
                             test_ratio:float,
                             valid_ratio:float,
                             batch_size:int,
                             max_val_size=1024,
                             max_test_size=1024,
                             csv_encoding='gb2312',
                             subfolder_name='',
                             seed=23
                             ):
    data = pd.read_csv(raw_data_path, encoding=csv_encoding)
    train_dataset, val_dataset, test_dataset = datasets.init_dataset(data=data,
                                                                     test_ratio=test_ratio, valid_ratio=valid_ratio,
                                                                     x_column=x_column_names,
                                                                     y_column=[y_column_name],
                                                                     spatial_column=spatial_column_names,
                                                                     batch_size=batch_size,
                                                                     max_val_size=max_val_size,
                                                                     max_test_size=max_test_size,
                                                                     sample_seed=seed)
    train_dataset.save(f'data_split_csv/{subfolder_name}', 'train', save_distance=False)
    val_dataset.save(f'data_split_csv/{subfolder_name}', 'val', save_distance=False)
    test_dataset.save(f'data_split_csv/{subfolder_name}', 'test', save_distance=False)

if __name__ == '__main__':
    pass
