# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 00:19:45 2018

@author: Rituraj
"""

#Software Defect Prediction 
import os
import numpy as np
import pandas as pd
import arff
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import recall_score
from imblearn.over_sampling import SMOTE

#add dataset normalization and feature selection function

def my_sdp_preprocessor(datafilename):
    file_extension = get_file_extension(datafilename)
    original_data = []
    if file_extension == '.csv':
        original_data = pd.read_csv(datafilename)
    elif file_extension == '.arff':
        with open(datafilename, 'r') as file:
            dataset = arff.load(file)
        original_data = pd.DataFrame(dataset['data'], columns=[attr[0] for attr in dataset['attributes']])
    
    # 检查数据集中是否存在缺失值，接着用False填充所有的缺失值
    original_data.isnull().values.any() #Gives false ie:No null value in dataset
    original_data = original_data.fillna(value=False)
    
    # original_X = pd.DataFrame(original_data.drop(['defects'],axis=1))
    # original_Y = original_data['defects']
    original_X = pd.DataFrame(original_data.drop(['Defective'],axis=1))
    original_Y = original_data['Defective'].map({'Y': 1, 'N': 0})
    original_Y = pd.DataFrame(original_Y)
    x_train1, x_test, y_train1, y_test= train_test_split(original_X, original_Y, test_size = .1,
                                                              random_state=12)
    
    #now we resample, and from that we take training and validation sets
    
    # 过采样，x_train2中包含了x_train1和新的数据
    sm = SMOTE(random_state=12, sampling_strategy = 1.0)
    x, y = sm.fit_resample(x_train1, y_train1)
    y_train2 = pd.DataFrame(y, columns=['Defective'])
    x_train2 = pd.DataFrame(x, columns=original_X.columns)
    
    # 划分最终的数据集
    x_train, x_val, y_train, y_val= train_test_split(x_train2, y_train2, test_size = .1,
                                                              random_state=12)
    
    combined_training_data = x_train.copy()
    combined_training_data['Defective'] = y_train   
    
    import seaborn as sns
    corr = combined_training_data.corr()
    sns.heatmap(corr, xticklabels=corr.columns,yticklabels=corr.columns)
    
    return original_data, original_X, original_Y,combined_training_data,x_train1,x_train2,x_train,x_test,x_val,y_train1,y_train2,y_train,y_test,y_val 


def get_file_extension(filename):
    _, file_extension = os.path.splitext(filename)
    return file_extension