#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2022/5/16 23:19
# @Author  : Grayson Liu
# @Email   : graysonliu@foxmail.com
# @File    : ods.py

import os
import requests
import pandas as pd
import rule.jvm2008 as rule_jvm2008  # 对于数据源jvm2008各属性列的变换逻辑
import rule.cpu2017 as rule_cpu2017  # 对于数据源cpu2017各属性列的变换逻辑
import rule.cpu2006 as rule_cpu2006  # 对于数据源cpu2006各属性列的变换逻辑
import rule.jbb2015 as rule_jbb2015  # 对于数据源jbb2015各属性列的变换逻辑
import rule.power_ssj2008 as rule_power_ssj2008  # 对于数据源power_ssj2008各属性列的变换逻辑


class OdsUtil:
    """
    贴源数据层的工具类
    """
    resource_list = ["cpu2017", "cpu2006", "jbb2015", "jvm2008", "power_ssj2008"]  # 支持的数据源名称

    # jvm2008数据源，感兴趣的属性以及属性名称的转换方法
    ods_jvm2008_col = ["Company", "System", "Result", "# cores", "# cores per chip", "Processor", "CPU Speed",
                       "1st Cache", "2nd Cache", "Other Cache", "Memory", "Updated ", "Disclosure"]
    ods_jvm2008_mapping = {"Updated ": "Updated"}
    # cpu2017数据源，感兴趣的属性以及属性名称的转换方法
    ods_cpu2017_col = ["Benchmark", "Hardware Vendor	", "System", "Base Result", "# Cores", "# Chips ",
                       "Processor ", "Processor MHz", "1st Level Cache", "2nd Level Cache", "3rd Level Cache",
                       "Other Cache", "Memory", "Updated ", "Disclosure"]
    ods_cpu2017_mapping = {"Hardware Vendor	": "Company",
                           "Base Result": "Result",
                           "# Cores": "# cores",
                           "# Chips ": "# chips",
                           "Processor ": "Processor",
                           "Processor MHz": "CPU Speed(MHz)",
                           "1st Level Cache": "1st Cache per core(KB)",
                           "2nd Level Cache": "2nd Cache per core(KB)",
                           "3rd Level Cache": "3rd Cache per chip(MB)",
                           "Other Cache": "Other Cache per chip(MB)",
                           "Memory": "Memory(GB)",
                           "Updated ": "Updated",
                           "Disclosure": "Report Link"}
    # cpu2006数据源，感兴趣的属性以及属性名称的转换方法
    ods_cpu2006_col = ["Benchmark", "Hardware Vendor	", "System", "Result", "# Cores", "# Chips ", "Processor ",
                       "Processor MHz",
                       "1st Level Cache", "2nd Level Cache", "3rd Level Cache", "Other Cache", "Memory", "Updated ",
                       "Disclosure"]
    ods_cpu2006_mapping = {"Hardware Vendor	": "Company",
                           "Base Result": "Result",
                           "# Cores": "# cores",
                           "# Chips ": "# chips",
                           "Processor ": "Processor",
                           "Processor MHz": "CPU Speed(MHz)",
                           "1st Level Cache": "1st Cache per core(KB)",
                           "2nd Level Cache": "2nd Cache per core(KB)",
                           "3rd Level Cache": "3rd Cache per chip(MB)",
                           "Other Cache": "Other Cache per chip(MB)",
                           "Memory": "Memory(GB)",
                           "Updated ": "Updated",
                           "Disclosure": "Report Link"}
    # jbb2015数据源，感兴趣的属性以及属性名称的转换方法
    ods_jbb2015_col = ["Benchmark", "Company", "System", "Critical-jOPS	", "# cores", "# cores per chip",
                       "Processor", "CPU Speed", "Primary Cache", "Secondary Cache", "Tertiary Cache", "Memory (GB)",
                       "Updated ", "Disclosure"]
    ods_jbb2015_mapping = {"Updated ": "Updated",
                           "Critical-jOPS": "Result",
                           "CPU Speed": "CPU Speed(MHz)",
                           "Memory (GB)": "Memory(GB)"}
    # power_ssj2008数据源，感兴趣的属性以及属性名称的转换方法
    ods_power_ssj2008_col = ["Hardware Vendor	", "System", "Result", "# Cores", "# Cores Per Chip ", "Processor ",
                             "Processor MHz", "1st Level Cache", "2nd Level Cache", "3rd Level Cache", "Memory (GB)",
                             "Updated ", "Disclosure"]
    ods_power_ssj2008_mapping = {"Processor MHz": "CPU Speed(MHz)", "# Cores": "# cores"}

    def __init__(self, raw_folder_path: str = "../data/raw", ods_folder_path: str = "../data/ods"):
        """
        构造方法，传入相关参数
        :param raw_folder_path: 数据源文件目录路径
        :param ods_folder_path: 贴源数据层文件目录路径
        """
        self.raw_folder_path = raw_folder_path
        self.ods_folder_path = ods_folder_path

    def download_latest_resource(self, resource_name: str):
        """
        下载最新的数据源
        :param resource_name: 数据源名称，应当是cpu2017, cpu2006, jbb2015, jvm2008, power_ssj2008其中的一个
        :return: 返回成功下载的文件路径名
        """
        assert resource_name in OdsUtil.resource_list
        # 根据数据源名称生成下载路径
        download_addr = f"https://www.spec.org/cgi-bin/osgresults?conf={resource_name};op=dump;format=csvdump"
        try:
            response = requests.get(download_addr, stream=True)
            response.raise_for_status()
            # 从response的header中获得文件名，这个文件名包含了数据源名称以及下载时的日期时间
            file_name = response.headers["Content-Disposition"].split(";")[1].split("=")[1][1:-1]
            file_path = os.path.join(self.raw_folder_path, file_name)
            # 写入文件
            with open(file_path, "wb") as f:
                for chunk in response.iter_content(chunk_size=1048576):
                    if chunk:
                        f.write(chunk)
            print(f"successfully downloaded resource {resource_name}, file path: {file_path}")
            return file_path
        except Exception as e:
            print("failed to download results")

    def get_latest_resource(self, resource_name: str, delete_old: bool = False):
        """
        返回本地最新的数据源文件路径名
        :param resource_name: 数据源名称，应当是cpu2017, cpu2006, jbb2015, jvm2008, power_ssj2008其中的一个
        :param delete_old: 是否删除旧的数据源文件
        :return: 返回最新的数据源文件路径名
        """
        assert resource_name in OdsUtil.resource_list
        all_file_path_list = os.listdir(self.raw_folder_path)  # 获取data文件夹下所有文件名的列表
        benchmark_file_path_list = [i for i in all_file_path_list if i.find(resource_name) != -1]  # 找到属于该数据源的所有文件
        max_time = 0  # 记录数据源文件的下载时间
        max_time_index = 0  # 记录数据源文件的在列表中的下标
        for index, i in enumerate(benchmark_file_path_list):
            time = int(i.split("-")[-2] + i.split("-")[-1].split(".")[0])
            if time > max_time:  # 保留数值最大（即下载时间最新）的数据源文件下标
                max_time = time
                max_time_index = index
        latest_file_path = benchmark_file_path_list[max_time_index]  # 最新的数据源文件名称
        if delete_old:  # 如果需要删除旧的数据源文件
            del benchmark_file_path_list[max_time_index]
            for i in benchmark_file_path_list:
                delete_file_path = os.path.join(self.raw_folder_path, i)
                print(f"Delete file: {delete_file_path}")
                os.remove(delete_file_path)
        return os.path.join(self.raw_folder_path, latest_file_path)  # 生成路径名并返回

    def get_ods_jvm2008(self, save_to_csv: bool = False) -> pd.DataFrame:
        """
        处理jvm2008数据源，经过变换，输出ods层的名为jvm2008的DataFrame
        :param save_to_csv: 是否将结果保存为CSV文件并输出到设定的ods文件夹
        :return:
        """
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# cores per chip为中间数据，数据清洗完成后应当删去）
        ods_jvm2008_data = pd.read_csv(self.get_latest_resource(resource_name="jvm2008"),
                                       usecols=OdsUtil.ods_jvm2008_col)

        # 更新部分属性名
        ods_jvm2008_data = ods_jvm2008_data.rename(columns=OdsUtil.ods_jvm2008_mapping)

        # 对相应属性进行处理，得到贴源数据层的结果
        ods_jvm2008_data["Company"] = ods_jvm2008_data["Company"].map(rule_jvm2008.company_rule)
        ods_jvm2008_data["Processor"] = ods_jvm2008_data["Processor"].map(rule_jvm2008.processor_rule)
        ods_jvm2008_data["CPU Speed(MHz)"] = ods_jvm2008_data["CPU Speed"].map(rule_jvm2008.cpu_speed_rule)
        ods_jvm2008_data["1st Cache per core(KB)"] = ods_jvm2008_data["1st Cache"].map(rule_jvm2008.first_cache_rule)
        ods_jvm2008_data["2nd Cache per core(KB)"] = ods_jvm2008_data.apply(rule_jvm2008.second_cache_rule, axis=1)
        ods_jvm2008_data["3rd Cache per chip(MB)"] = ods_jvm2008_data["Other Cache"].map(rule_jvm2008.third_cache_rule)
        ods_jvm2008_data["Other Cache per chip(MB)"] = ods_jvm2008_data["Other Cache"].map(
            rule_jvm2008.other_cache_rule)
        ods_jvm2008_data["Memory(GB)"] = ods_jvm2008_data["Memory"].map(rule_jvm2008.memory_rule)
        ods_jvm2008_data["Report Link"] = ods_jvm2008_data["Disclosure"].map(rule_jvm2008.report_link_rule)

        ods_jvm2008_data = ods_jvm2008_data.drop(
            columns=["# cores per chip", "CPU Speed", "1st Cache", "2nd Cache", "Other Cache", "Memory", "Disclosure"])

        ods_order = ["Company", "System", "Result", "# cores", "Processor", "CPU Speed(MHz)", "1st Cache per core(KB)",
                     "2nd Cache per core(KB)", "3rd Cache per chip(MB)", "Other Cache per chip(MB)", "Memory(GB)",
                     "Updated", "Report Link"]

        ods_jvm2008_data: pd.DataFrame = ods_jvm2008_data[ods_order]
        if save_to_csv:
            ods_jvm2008_data.to_csv(os.path.join(self.ods_folder_path, "ods_jvm2008.csv"))
        return ods_jvm2008_data

    def __process_cpu2017_dataframe(self, src_df: pd.DataFrame) -> pd.DataFrame:
        """
        私有方法，用于get_ods_cpu2017_xxx函数的处理，因为处理逻辑一致
        :param src_df: 原始DataFrame
        :return: 处理后的DataFrame
        """
        # 更新部分属性名 ["Company", "System", "Result", "# cores", "Processor", "CPU Speed(MHz)", "1st Cache per core(KB)",
        # "2nd Cache per core(KB)", "3rd Cache per chip(MB)", "Other Cache per chip(MB)", "Memory(GB)", "Updated",
        # "Report Link"]
        src_df = src_df.rename(columns=OdsUtil.ods_cpu2017_mapping)

        src_df["Company"] = src_df["Company"].map(rule_cpu2017.company_rule)
        src_df["1st Cache per core(KB)"] = src_df["1st Cache per core(KB)"].map(
            rule_cpu2017.first_cache_rule)
        src_df["2nd Cache per core(KB)"] = src_df.apply(rule_cpu2017.second_cache_rule, axis=1)
        src_df["3rd Cache per chip(MB)"] = src_df.apply(rule_cpu2017.third_cache_rule, axis=1)
        src_df["Other Cache per chip(MB)"] = src_df["Other Cache per chip(MB)"].map(
            rule_cpu2017.other_cache_rule)
        src_df["Memory(GB)"] = src_df["Memory(GB)"].map(rule_cpu2017.memory_rule)
        src_df["Report Link"] = src_df["Report Link"].map(rule_cpu2017.report_link_rule)

        ods_order = ["Company", "System", "Result", "# cores", "Processor", "CPU Speed(MHz)", "1st Cache per core(KB)",
                     "2nd Cache per core(KB)", "3rd Cache per chip(MB)", "Other Cache per chip(MB)", "Memory(GB)",
                     "Updated", "Report Link"]

        dst_df: pd.DataFrame = src_df[ods_order].reset_index(drop=True)
        return dst_df

    def get_ods_cpu2017_intspeed(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2017, CINT2017rate，CFP2017，CFP2017rate
        ods_cpu2017_intspeed_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2017"),
                                                usecols=OdsUtil.ods_cpu2017_col).query("Benchmark == 'CINT2017'")
        ods_cpu2017_intspeed_data = self.__process_cpu2017_dataframe(ods_cpu2017_intspeed_data)

        if save_to_csv:
            ods_cpu2017_intspeed_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2017_intspeed.csv"))
        return ods_cpu2017_intspeed_data

    def get_ods_cpu2017_intrate(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2017, CINT2017rate，CFP2017，CFP2017rate
        ods_cpu2017_intrate_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2017"),
                                               usecols=OdsUtil.ods_cpu2017_col).query("Benchmark == 'CINT2017rate'")
        ods_cpu2017_intrate_data = self.__process_cpu2017_dataframe(ods_cpu2017_intrate_data)

        if save_to_csv:
            ods_cpu2017_intrate_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2017_intrate.csv"))
        return ods_cpu2017_intrate_data

    def get_ods_cpu2017_fpspeed(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2017, CINT2017rate，CFP2017，CFP2017rate
        ods_cpu2017_fpspeed_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2017"),
                                               usecols=OdsUtil.ods_cpu2017_col).query("Benchmark == 'CFP2017'")
        ods_cpu2017_fpspeed_data = self.__process_cpu2017_dataframe(ods_cpu2017_fpspeed_data)

        if save_to_csv:
            ods_cpu2017_fpspeed_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2017_fpspeed.csv"))
        return ods_cpu2017_fpspeed_data

    def get_ods_cpu2017_fprate(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2017, CINT2017rate，CFP2017，CFP2017rate
        ods_cpu2017_fprate_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2017"),
                                              usecols=OdsUtil.ods_cpu2017_col).query("Benchmark == 'CFP2017rate'")
        ods_cpu2017_fprate_data = self.__process_cpu2017_dataframe(ods_cpu2017_fprate_data)

        if save_to_csv:
            ods_cpu2017_fprate_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2017_fprate.csv"))
        return ods_cpu2017_fprate_data

    def __process_cpu2006_dataframe(self, src_df: pd.DataFrame) -> pd.DataFrame:
        """
        私有方法，用于get_ods_cpu2006_xxx函数的处理，因为处理逻辑一致
        :param src_df: 原始DataFrame
        :return: 处理后的DataFrame
        """
        # 更新部分属性名 ["Company", "System", "Result", "# cores", "Processor", "CPU Speed(MHz)", "1st Cache per core(KB)",
        # "2nd Cache per core(KB)", "3rd Cache per chip(MB)", "Other Cache per chip(MB)", "Memory(GB)", "Updated",
        # "Report Link"]
        src_df = src_df.rename(columns=OdsUtil.ods_cpu2006_mapping)

        src_df["Company"] = src_df["Company"].map(rule_cpu2006.company_rule)
        src_df["1st Cache per core(KB)"] = src_df.apply(rule_cpu2006.first_cache_rule, axis=1)
        src_df["2nd Cache per core(KB)"] = src_df.apply(rule_cpu2006.second_cache_rule, axis=1)
        src_df["3rd Cache per chip(MB)"] = src_df.apply(rule_cpu2006.third_cache_rule, axis=1)
        src_df["Other Cache per chip(MB)"] = src_df.apply(rule_cpu2006.other_cache_rule, axis=1)
        src_df["Memory(GB)"] = src_df["Memory(GB)"].map(rule_cpu2006.memory_rule)
        src_df["Report Link"] = src_df["Report Link"].map(rule_cpu2006.report_link_rule)

        ods_order = ["Company", "System", "Result", "# cores", "Processor", "CPU Speed(MHz)", "1st Cache per core(KB)",
                     "2nd Cache per core(KB)", "3rd Cache per chip(MB)", "Other Cache per chip(MB)", "Memory(GB)",
                     "Updated", "Report Link"]

        dst_df: pd.DataFrame = src_df[ods_order].reset_index(drop=True)
        return dst_df

    def get_ods_cpu2006_intspeed(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2006, CINT2006rate，CFP2006，CFP2006rate
        ods_cpu2006_intspeed_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2006"),
                                                usecols=OdsUtil.ods_cpu2006_col).query("Benchmark == 'CINT2006'")
        ods_cpu2006_intspeed_data = self.__process_cpu2006_dataframe(ods_cpu2006_intspeed_data)

        if save_to_csv:
            ods_cpu2006_intspeed_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2006_intspeed.csv"))
        return ods_cpu2006_intspeed_data

    def get_ods_cpu2006_intrate(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2006, CINT2006rate，CFP2006，CFP2006rate
        ods_cpu2006_intrate_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2006"),
                                               usecols=OdsUtil.ods_cpu2006_col).query("Benchmark == 'CINT2006rate'")
        ods_cpu2006_intrate_data = self.__process_cpu2006_dataframe(ods_cpu2006_intrate_data)

        if save_to_csv:
            ods_cpu2006_intrate_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2006_intrate.csv"))
        return ods_cpu2006_intrate_data

    def get_ods_cpu2006_fpspeed(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2006, CINT2006rate，CFP2006，CFP2006rate
        ods_cpu2006_fpspeed_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2006"),
                                               usecols=OdsUtil.ods_cpu2006_col).query("Benchmark == 'CFP2006'")
        ods_cpu2006_fpspeed_data = self.__process_cpu2006_dataframe(ods_cpu2006_fpspeed_data)

        if save_to_csv:
            ods_cpu2006_fpspeed_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2006_fpspeed.csv"))
        return ods_cpu2006_fpspeed_data

    def get_ods_cpu2006_fprate(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成4个，CINT2006, CINT2006rate，CFP2006，CFP2006rate
        ods_cpu2006_fprate_data = pd.read_csv(self.get_latest_resource(resource_name="cpu2006"),
                                              usecols=OdsUtil.ods_cpu2006_col).query("Benchmark == 'CFP2006rate'")
        ods_cpu2006_fprate_data = self.__process_cpu2006_dataframe(ods_cpu2006_fprate_data)

        if save_to_csv:
            ods_cpu2006_fprate_data.to_csv(os.path.join(self.ods_folder_path, "ods_cpu2006_fprate.csv"))
        return ods_cpu2006_fprate_data

    def __process_jbb2015_dataframe(self, src_df: pd.DataFrame) -> pd.DataFrame:
        """
        私有方法，用于get_ods_jbb2015_xxx函数的处理，因为处理逻辑一致
        :param src_df: 原始DataFrame
        :return: 处理后的DataFrame
        """
        # 去掉属性中的\t，例Max-jOPS\t修改为Max-jOPS
        src_df.columns = [j.strip() for j in src_df.columns.values.tolist()]

        # 更新部分属性名
        src_df = src_df.rename(columns=OdsUtil.ods_jbb2015_mapping)

        src_df["Company"] = src_df["Company"].map(rule_jbb2015.company_rule)
        src_df["Processor"] = src_df["Processor"].map(rule_jbb2015.processor_rule)
        src_df["1st Cache per core(KB)"] = src_df["Primary Cache"].map(rule_jbb2015.first_cache_rule)
        src_df["2nd Cache per core(KB)"] = src_df.apply(rule_jbb2015.second_cache_rule, axis=1)
        src_df["3rd Cache per chip(MB)"] = src_df.apply(rule_jbb2015.third_cache_rule, axis=1)
        src_df["Other Cache per chip(MB)"] = "0"
        src_df["Memory(GB)"] = src_df["Memory(GB)"].map(rule_jbb2015.memory_rule)
        src_df["Report Link"] = src_df["Disclosure"].map(rule_jbb2015.report_link_rule)

        src_df = src_df.drop(
            columns=["# cores per chip", "Primary Cache", "Secondary Cache", "Tertiary Cache", "Disclosure"])

        ods_order = ["Company", "System", "Result", "# cores", "Processor", "CPU Speed(MHz)", "1st Cache per core(KB)",
                     "2nd Cache per core(KB)", "3rd Cache per chip(MB)", "Other Cache per chip(MB)", "Memory(GB)",
                     "Updated", "Report Link"]

        dst_df: pd.DataFrame = src_df[ods_order].reset_index(drop=True)
        return dst_df

    def get_ods_jbb2015_comp(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成3个，JBB2015COMP, JBB2015DIST, JBB2015MULTI
        ods_jbb2015_comp_data = pd.read_csv(self.get_latest_resource(resource_name="jbb2015"),
                                            usecols=OdsUtil.ods_jbb2015_col).query("Benchmark == 'JBB2015COMP'")
        ods_jbb2015_comp_data = self.__process_jbb2015_dataframe(ods_jbb2015_comp_data)

        if save_to_csv:
            ods_jbb2015_comp_data.to_csv(os.path.join(self.ods_folder_path, "ods_jbb2015_comp.csv"))
        return ods_jbb2015_comp_data

    def get_ods_jbb2015_dist(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成3个，JBB2015COMP, JBB2015DIST, JBB2015MULTI
        ods_jbb2015_dist_data = pd.read_csv(self.get_latest_resource(resource_name="jbb2015"),
                                            usecols=OdsUtil.ods_jbb2015_col).query("Benchmark == 'JBB2015DIST'")
        ods_jbb2015_dist_data = self.__process_jbb2015_dataframe(ods_jbb2015_dist_data)

        if save_to_csv:
            ods_jbb2015_dist_data.to_csv(os.path.join(self.ods_folder_path, "ods_jbb2015_dist.csv"))
        return ods_jbb2015_dist_data

    def get_ods_jbb2015_multi(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        # 还需要通过Benchmark来将该数据源的数据拆分成3个，JBB2015COMP, JBB2015DIST, JBB2015MULTI
        ods_jbb2015_multi_data = pd.read_csv(self.get_latest_resource(resource_name="jbb2015"),
                                             usecols=OdsUtil.ods_jbb2015_col).query("Benchmark == 'JBB2015MULTI'")
        ods_jbb2015_multi_data = self.__process_jbb2015_dataframe(ods_jbb2015_multi_data)

        if save_to_csv:
            ods_jbb2015_multi_data.to_csv(os.path.join(self.ods_folder_path, "ods_jbb2015_multi.csv"))
        return ods_jbb2015_multi_data

    def get_ods_power_ssj2008(self, save_to_csv: bool = False) -> pd.DataFrame:
        # 抽取相关属性，构成DataFrame，前缀为ods表明为贴源数据层（其中# chips为中间数据，数据清洗完成后应当删去)
        ods_power_ssj2008_data = pd.read_csv(self.get_latest_resource(resource_name="power_ssj2008"),
                                             usecols=OdsUtil.ods_power_ssj2008_col)

        # 去掉属性中的\t
        # 例Max-jOPS\t修改为Max-jOPS
        ods_power_ssj2008_data.columns = [j.strip() for j in ods_power_ssj2008_data.columns.values.tolist()]
        # 更新部分属性名
        ods_power_ssj2008_data = ods_power_ssj2008_data.rename(columns=OdsUtil.ods_power_ssj2008_mapping)

        ods_power_ssj2008_data["Company"] = ods_power_ssj2008_data["Hardware Vendor"].map(
            rule_power_ssj2008.company_rule)
        ods_power_ssj2008_data["System"] = ods_power_ssj2008_data["System"].map(rule_power_ssj2008.system_rule)
        ods_power_ssj2008_data["result"] = ods_power_ssj2008_data.drop(
            ods_power_ssj2008_data.index[(ods_power_ssj2008_data["Result"] == 0)], inplace=True)
        ods_power_ssj2008_data["Processor"] = ods_power_ssj2008_data["Processor"].map(rule_power_ssj2008.processor_rule)
        ods_power_ssj2008_data["1st Cache per core(KB)"] = ods_power_ssj2008_data.apply(
            rule_power_ssj2008.first_cache_rule, axis=1)
        ods_power_ssj2008_data["2nd Cache per core(KB)"] = ods_power_ssj2008_data.apply(
            rule_power_ssj2008.second_cache_rule, axis=1)
        ods_power_ssj2008_data["3rd Cache per chip(MB)"] = ods_power_ssj2008_data.apply(
            rule_power_ssj2008.third_cache_rule, axis=1)
        ods_power_ssj2008_data["Other Cache per chip(MB)"] = "0"
        ods_power_ssj2008_data["Memory(GB)"] = ods_power_ssj2008_data["Memory (GB)"].map(
            rule_power_ssj2008.memory_rule)
        ods_power_ssj2008_data["Report Link"] = ods_power_ssj2008_data["Disclosure"].map(
            rule_power_ssj2008.report_link_rule)

        ods_power_ssj2008_data = ods_power_ssj2008_data.drop(
            columns=["Hardware Vendor", "# Cores Per Chip", "1st Level Cache", "2nd Level Cache", "3rd Level Cache",
                     "Memory (GB)", "Disclosure"])

        ods_order = ["Company", "System", "Result", "# cores", "Processor", "CPU Speed(MHz)",
                     "1st Cache per core(KB)", "2nd Cache per core(KB)", "3rd Cache per chip(MB)",
                     "Other Cache per chip(MB)", "Memory(GB)", "Updated", "Report Link"]

        ods_power_ssj2008_data = ods_power_ssj2008_data[ods_order]

        if save_to_csv:
            ods_power_ssj2008_data.to_csv(os.path.join(self.ods_folder_path, "ods_power_ssj2008.csv"))
        return ods_power_ssj2008_data
