from concurrent.futures import ThreadPoolExecutor
import concurrent.futures
import zipfile
import os
import pandas as pd
import numpy as np
from tqdm import tqdm


data_root = r"E:\迅雷下载\异常检测\Backblaze"
data_files = [r"data_Q1_2019.zip", ] # r"data_Q2_2019.zip"
data_files2 = [r"data_Q1_2019.zip", r"data_Q2_2019.zip"]
data_files3 = [r"data_Q1_2019.zip", r"data_Q2_2019.zip", r"data_Q3_2019.zip", r"data_Q4_2019.zip",
              r"data_Q1_2020.zip", r"data_Q2_2020.zip", r"data_Q3_2020.zip", r"data_Q4_2020.zip"]

num_threads = 3


def process_csv_file( zip_ref: zipfile.ZipFile, f: str, ratio: int, model: str):
    with zip_ref.open(f) as csv_file:
        df = pd.read_csv(csv_file)
        return filtering(df, ratio, model)

def load_data(file_list: list, ratio: int = 3, model: str = None):
    samples = pd.DataFrame()
    for filename in file_list:
        filename = os.path.join(data_root, filename)
        
        with zipfile.ZipFile(filename, "r") as zip_ref:
            task = zip_ref.namelist()[::2]
            
            # 创建线程池，这里设置最大工作线程数为num_threads
            with ThreadPoolExecutor(max_workers=num_threads) as executor:
                # 提交每个数据列表的计算任务到线程池
                future_results = [executor.submit(process_csv_file, zip_ref, data, ratio, model) 
                                  for data in task if data.endswith(".csv")]
                
                # 用于存放各个任务的计算结果
                results = []
                count = samples.shape[0]
                # 遍历已提交的任务，获取结果
                with tqdm(concurrent.futures.as_completed(future_results), total=len(future_results),
                          desc="start to progress >> %s" % filename) as tbar:
                    for future in tbar:
                        try:
                            result = future.result()
                            results.append(result)
                            count += len(result)
                            tbar.set_postfix(count=count)
                        except Exception as e:
                            print(f"计算出现异常: {e}")

            # 合并计算结果
            final_result = pd.concat(results)
        
        samples = pd.concat([samples, final_result])
                         
    return samples

def filtering(data: pd.DataFrame, ratio: int, model: str):
    """
    过滤出异常数据, 正常数据按ratio倍数来采样
    """
    if model is not None:
        data = data[data["model"] == model]
        
    failure_data = data[data["failure"] == 1]
    
    # 是否提取所有正常数据
    if ratio == np.inf:
        normal_data = data[data["failure"] == 0]
    else: # 按ratio倍数来采样
        count_failure = failure_data.shape[0]
        count_normal = ratio * count_failure
        normal_data = data[data["failure"] == 0].sample(count_normal)
    data = pd.concat([failure_data, normal_data])
    
    # 过滤指定的列
    features_specified = []
    features_specified.append("date")
    features_specified.append("model")
    features_specified.append("failure")
    
    features = [5, 9, 187, 188, 193, 194, 197, 198, 241, 242] 
    for feature in features:
        features_specified += ["smart_{0}_raw".format(feature)]
    
    data = data[features_specified]
    data = data.fillna(0) 
    return data


def main():
    model = "ST12000NM0007"
    
    data = load_data(data_files3, ratio=200, model=model)
    data.to_csv(f"{model}.csv", index=False)


if __name__ == '__main__':
    main()
