import os

import pandas as pd
from utils.getfile_abs_path import getFilePath
from utils.web_scrapers.ssc_scraper import scrape_ssc_data
import yaml


def load_raw_data(file_path):
    """加载原始数据"""
    return pd.read_csv(file_path)


def clean_data(df):
    """数据清洗"""
    df = df.dropna()
    df = df.drop_duplicates()
    return df


def save_processed_data(df, file_path):
    """保存处理后的数据"""
    df.to_csv(file_path, index=False)


# 针对 随机深林回归模型，对数据进行处理
def data_processing_random_forest(filepath):
    data = load_raw_data(filepath)
    X, Y = preprocess_data(data)
    return X, Y


# if __name__ == "__main__":
#     # 加载配置文件
#     with open('config/ssq_config.yaml', 'r') as f:
#         config = yaml.safe_load(f)
#
#     # 爬取数据或从Excel转换数据
#     if config.get('scraper'):
#         scrape_ssq_data(config['scraper']['url'], config['data']['raw_data_path'])
#     else:
#         convert_ssq_excel_to_csv(config['excel']['file_path'], config['excel']['sheet_name'], config['data']['processed_data_path'])
#
#     # 加载和处理数据
#     raw_data_path = config['data']['raw_data_path']
#     processed_data_path = config['data']['processed_data_path']
#
#     df = load_raw_data(raw_data_path)
#     df = clean_data(df)
#     save_processed_data(df, processed_data_path)
#
def convert_ssc_excel_to_csv(param, param1, param2):
    pass


def process_ssc_data(configpath):
    realpath = getFilePath(configpath)
    with open(realpath, 'r') as f:
        config = yaml.safe_load(f)

    # 爬取数据 网络爬取 或者是 excel导入
    # if config.get('scraper'):
    #     scrape_ssc_data(config['scraper']['url'], config['data']['raw_data_path'])
    # else:
    #     convert_ssc_excel_to_csv(config['excel']['file_path'], config['excel']['sheet_name'],
    #                              config['data']['processed_data_path'])

    # 加载和处理数据
    raw_data_path = config['data']['raw_data_path']
    processed_data_path = config['data']['processed_data_path']

    full_raw_data_path = getFilePath(raw_data_path)
    full_processed_data_path = getFilePath(processed_data_path)

    full_processed_data_parent_path = os.path.dirname(full_processed_data_path)
    if not os.path.exists(full_processed_data_parent_path):
        os.makedirs(full_processed_data_parent_path)

    df = load_raw_data(getFilePath(full_raw_data_path))
    df = clean_data(df)
    save_processed_data(df, getFilePath(full_processed_data_path))
