import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import arff
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import StandardScaler

# 定义数据集的列名
column_names = [
    'LOC_BLANK', 'BRANCH_COUNT', 'CALL_PAIRS', 'LOC_CODE_AND_COMMENT',
    'LOC_COMMENTS', 'CONDITION_COUNT', 'CYCLOMATIC_COMPLEXITY',
    'CYCLOMATIC_DENSITY', 'DECISION_COUNT', 'DECISION_DENSITY',
    'DESIGN_COMPLEXITY', 'DESIGN_DENSITY', 'EDGE_COUNT',
    'ESSENTIAL_COMPLEXITY', 'ESSENTIAL_DENSITY', 'LOC_EXECUTABLE',
    'PARAMETER_COUNT', 'GLOBAL_DATA_COMPLEXITY', 'GLOBAL_DATA_DENSITY',
    'HALSTEAD_CONTENT', 'HALSTEAD_DIFFICULTY', 'HALSTEAD_EFFORT',
    'HALSTEAD_ERROR_EST', 'HALSTEAD_LENGTH', 'HALSTEAD_LEVEL',
    'HALSTEAD_PROG_TIME', 'HALSTEAD_VOLUME', 'MAINTENANCE_SEVERITY',
    'MODIFIED_CONDITION_COUNT', 'MULTIPLE_CONDITION_COUNT', 'NODE_COUNT',
    'NORMALIZED_CYLOMATIC_COMPLEXITY', 'NUM_OPERANDS', 'NUM_OPERATORS',
    'NUM_UNIQUE_OPERANDS', 'NUM_UNIQUE_OPERATORS', 'NUMBER_OF_LINES',
    'PATHOLOGICAL_COMPLEXITY', 'PERCENT_COMMENTS', 'LOC_TOTAL', 'Defective'
]


# 加载数据
def load_data(filepath):
    data = pd.read_csv(filepath, names=column_names, na_values=['?'])
    # 将标签列从'Y', 'N'转换为1, 0
    data['Defective'] = data['Defective'].map({'Y': 1, 'N': 0})

    # 尝试将所有数值列转换为浮点数
    for col in data.columns[:-1]:
        try:
            data[col] = pd.to_numeric(data[col], errors='coerce')
        except Exception as e:
            print(f"Error converting column {col}: {e}")

    # 检查是否有非数字类型的特征
    non_numeric_features = [col for col in data.columns[:-1] if not np.issubdtype(data[col].dtype, np.number)]
    if non_numeric_features:
        raise ValueError(f"Non-numeric columns found after conversion: {non_numeric_features}")

    # 处理缺失值，这里简单地删除含有缺失值的行
    data = data.dropna()
    features = data.iloc[:, :-1].values
    labels = data.iloc[:, -1].values

    # 打印特征和标签的前几行以确认数据格式
    print("Features:")
    print(features[:5])
    print("Labels:")
    print(labels[:5])

    return features, labels


# 数据预处理
def preprocess_data(features):
    # 检查特征是否全部为数值类型
    if not np.issubdtype(features.dtype, np.number):
        raise ValueError("Features contain non-numeric values.")

    # 特征缩放
    scaler = StandardScaler()
    features = scaler.fit_transform(features)
    return features


# 过采样
def oversample_data(features, labels):
    smote = SMOTE(random_state=42)
    X_resampled, y_resampled = smote.fit_resample(features, labels)
    return X_resampled, y_resampled

def visualize_data(data):
    # 创建一个较大的画布
    plt.figure(figsize=(15, 10))

    # 绘制散点图
    plt.scatter(data['CYCLOMATIC_COMPLEXITY'], data['HALSTEAD_EFFORT'], c=data['Defective'], cmap='viridis')
    plt.colorbar(label='Defective (Y/N)')
    plt.xlabel('Cyclomatic Complexity')
    plt.ylabel('Halstead Effort')
    plt.title('Cyclomatic Complexity vs Halstead Effort by Defectiveness')
    plt.show()

    # 绘制多个属性的散点图
    attributes_to_plot = ['CYCLOMATIC_COMPLEXITY', 'HALSTEAD_EFFORT', 'LOC_BLANK', 'BRANCH_COUNT', 'CONDITION_COUNT']

    # 创建子图
    fig, axes = plt.subplots(nrows=len(attributes_to_plot), ncols=len(attributes_to_plot), figsize=(20, 20))

    for i, attr1 in enumerate(attributes_to_plot):
        for j, attr2 in enumerate(attributes_to_plot):
            ax = axes[i, j]
            ax.scatter(data[attr1], data[attr2], c=data['Defective'], cmap='viridis', alpha=0.6)
            ax.set_xlabel(attr1)
            ax.set_ylabel(attr2)
            ax.set_title(f'{attr1} vs {attr2}')
            if i == len(attributes_to_plot) - 1:
                ax.set_xlabel(attr2)
            else:
                ax.set_xticklabels([])
            if j == 0:
                ax.set_ylabel(attr1)
            else:
                ax.set_yticklabels([])

    plt.tight_layout()
    plt.show()
def main():
    filepath = r'C:\Users\xR\Desktop\python\software-defect-prediction\dataset\PC1.arff'  # 替换为你的数据集路径
    features, labels = load_data(filepath)

    # 创建DataFrame
    column_names = [
        'LOC_BLANK', 'BRANCH_COUNT', 'CALL_PAIRS', 'LOC_CODE_AND_COMMENT',
        'LOC_COMMENTS', 'CONDITION_COUNT', 'CYCLOMATIC_COMPLEXITY',
        'CYCLOMATIC_DENSITY', 'DECISION_COUNT', 'DECISION_DENSITY',
        'DESIGN_COMPLEXITY', 'DESIGN_DENSITY', 'EDGE_COUNT',
        'ESSENTIAL_COMPLEXITY', 'ESSENTIAL_DENSITY', 'LOC_EXECUTABLE',
        'PARAMETER_COUNT', 'GLOBAL_DATA_COMPLEXITY', 'GLOBAL_DATA_DENSITY',
        'HALSTEAD_CONTENT', 'HALSTEAD_DIFFICULTY', 'HALSTEAD_EFFORT',
        'HALSTEAD_ERROR_EST', 'HALSTEAD_LENGTH', 'HALSTEAD_LEVEL',
        'HALSTEAD_PROG_TIME', 'HALSTEAD_VOLUME', 'MAINTENANCE_SEVERITY',
        'MODIFIED_CONDITION_COUNT', 'MULTIPLE_CONDITION_COUNT', 'NODE_COUNT',
        'NORMALIZED_CYLOMATIC_COMPLEXITY', 'NUM_OPERANDS', 'NUM_OPERATORS',
        'NUM_UNIQUE_OPERANDS', 'NUM_UNIQUE_OPERATORS', 'NUMBER_OF_LINES',
        'PATHOLOGICAL_COMPLEXITY', 'PERCENT_COMMENTS', 'LOC_TOTAL', 'Defective'
    ]

    # 创建DataFrame
    data = pd.DataFrame(np.hstack((features, labels.reshape(-1, 1))), columns=column_names)

    # 数据预处理
    features = preprocess_data(features)

    # 过采样处理数据不平衡问题
    features, labels = oversample_data(features, labels)

    # 可视化数据
    visualize_data(data)

if __name__ == '__main__':
    main()