# import os.path
# import sys
# ROOT_DIR = os.path.abspath('')
# sys.path.append('D:\PyCharm 2022.1\PycharmProjects\new\Software_Defect_Prediction\pythonProject')
# sys.path.append(ROOT_DIR)
from util.dataClean import arff_to_csv
import glob
import pandas as pd

# 读取数据，观察数据集
# 观察原始数据集，可视化观察特点

# 读取稀疏矩阵时才用到，此次用的数据集就不需要这么读取了
# def parse_row(line, len_row):
#     line = line.replace('{', '').replace('}', '')
#
#     row = np.zeros(len_row)
#     for data in line.split(','):
#         index, value = data.split()
#         row[int(index)] = float(value)
#     return row

# def read_data_arff(filename):
#     # Step 1. Read data by row.
#     with open(filename, 'r') as fp:
#         file_content = fp.readlines()
#     # Step 2. Get the columns.
#     columns = []
#     len_attr = len('@attribute')
#     for line in file_content:
#         if line.startswith('@attribute '):
#             col_name = line[len_attr:].split()[0]
#             columns.append(col_name)
#     # Step 3. Get the rows.
#     rows = []
#     len_row = len(columns)
#     for line in file_content:
#         if line.startswith('{'):
#             rows.append(parse_row(line, len_row))
#     # Step 4. Return the results.
#     df = pd.DataFrame(data=rows, columns=columns)
#     print(df)
#     return df

# 遍历所有arff文件，将其转换成csv文件
for file in glob.glob('../dataset/OriginalData/MDP/*.arff'):
    csv_path = file.replace('arff', 'csv').replace('MDP', 'CSV')
    arff_to_csv(file, csv_path)
    # data, _ = arff.loadarff(file)
    # df = pd.DataFrame(data)
    # print(df)

# 将所有csv文件合并存储到merge_data中
# merge_data是测试是否合并成功生成的，那部分代码已删除，因为可以直接对merge_data进行操作
merge_data = pd.DataFrame()
for csv_file in glob.glob('../dataset/OriginalData/CSV/*.csv'):
    data = pd.read_csv(csv_file)
    merge_data = pd.concat([merge_data, data], ignore_index=True)
# merge_data.to_csv("../dataset/CleanedData/merge_data.csv", index=False)
# print(merge_data)

# 对数据进行处理
# 丢弃问题数据——去除数据集中的空值NAN
merge_data = merge_data.dropna(thresh=32)
# 查找缺失值的行与列，isnull().values获取行信息，any()获取列信息
# merge_data.isnull().values.any()
# print("数据情况")
# print(merge_data.describe())
# 用0来填充缺失的值
merge_data.fillna(value=0, inplace=True)
# 重置数据索引
# merge_data.reset_index(inplace=True, drop=True)
# print(merge_data.describe())
merge_data.to_csv("../dataset/CleanedData/clean_data.csv", index=False)
