import pandas as pd
from utils.excel_converters.ssq_excel_converter import convert_ssq_excel_to_csv
from utils.web_scrapers.ssq_scraper import scrape_ssq_data
import yaml


def load_raw_data(file_path):
    """加载原始数据"""
    return pd.read_csv(file_path)

def clean_data(df):
    """数据清洗"""
    df = df.dropna()
    df = df.drop_duplicates()
    return df

def save_processed_data(df, file_path):
    """保存处理后的数据"""
    df.to_csv(file_path, index=False)
    
 # 针对 随机深林回归模型，对数据进行处理
def data_processing_random_forest(filepath):
     data = load_raw_data(filepath)
     X,Y = preprocess_data(data)
     return X, Y

if __name__ == "__main__":
    # 加载配置文件
    with open('config/ssq_config.yaml', 'r') as f:
        config = yaml.safe_load(f)
    
    # 爬取数据或从Excel转换数据
    if config.get('scraper'):
        scrape_ssq_data(config['scraper']['url'], config['data']['raw_data_path'])
    else:
        convert_ssq_excel_to_csv(config['excel']['file_path'], config['excel']['sheet_name'], config['data']['processed_data_path'])
    
    # 加载和处理数据
    raw_data_path = config['data']['raw_data_path']
    processed_data_path = config['data']['processed_data_path']
    
    df = load_raw_data(raw_data_path)
    df = clean_data(df)
    save_processed_data(df, processed_data_path)
    
def  main():
    
    with open('config/ssq_config.yaml', 'r') as f:
        config = yaml.safe_load(f)
    
    # 爬取数据或从Excel转换数据
    if config.get('scraper'):
        scrape_ssq_data(config['scraper']['url'], config['data']['raw_data_path'])
    else:
        convert_ssq_excel_to_csv(config['excel']['file_path'], config['excel']['sheet_name'], config['data']['processed_data_path'])
    
    # 加载和处理数据
    raw_data_path = config['data']['raw_data_path']
    processed_data_path = config['data']['processed_data_path']
    
    df = load_raw_data(raw_data_path)
    df = clean_data(df)
    save_processed_data(df, processed_data_path)