import os
import pathlib
import sys
root_path = str(pathlib.Path(os.path.abspath(__file__)).parent.parent)
sys.path.append(root_path)

import pandas as pd
from config.base_config import output_path
from model.model_pipeline import train
from model.feature_pipeline import processes
from utils.util import get_transformer_feature_names
import argparse
import os


def read_data(data_path, label_name, task):
    df = pd.read_csv(data_path)
    if task == "cluster":
        return df, None
    else:
        X = df.drop([label_name], axis=1)
        y = df[label_name].values
        return X, y


if __name__ == '__main__':
    parser = argparse.ArgumentParser(usage="it's usage tip.", description="help info.")
    parser.add_argument("-d", "--data", help="train data", dest="data")
    parser.add_argument("-l", "--label", help="label", dest="label")
    parser.add_argument("-y", "--dtype", help="task type", dest="dtype", default='cls')
    
    args = parser.parse_args()
    X, y = read_data(args.data, args.label, args.dtype)

    output_path = os.path.dirname(args.data)
    filename = os.path.basename(args.data)
    new_data_file = output_path + "/new_" + filename
    
    X_ = processes.fit_transform(X)
    new_columns = get_transformer_feature_names(processes)
    new_data = pd.SparseDataFrame(X_,columns=new_columns)
    new_data.to_csv(new_data_file, index=False)

    train(X_, y, args.dtype)
