import pandas as pd
from pandas import DataFrame
import random


class DataMiningTools (): #封装了常用的数据挖掘工具

    def AttributeNum (self,train): #计算每个离散属性在数据集中有几种取值
        train_size = train.shape[0]
        col = train.shape[1]
        a_num_arr = []

        for i in range (0,col): #遍历数据集的每一列
            a_value = []
            length = len(a_value)

            for j in range (0,train_size): #遍历数据集的每一行
                a = train.iloc[j,i]
                count = 0 #用判断是否有重复

                for k in range (0,length):
                    if a == a_value[k]: #重复
                        break
                    count += 1

                if count == length: #没有重复
                    a_value.append(a)
                    length = len(a_value)

            a_num_arr.append(len(a_value))

        return a_num_arr


    def AttributeMax (self,train): #计算每个离散属性取值个数上限
        a_num_arr = []
        col = train.shape[1]
        train_size = train.shape[0]

        for i in range (0,col):
            a_max = 0
            for j in range (0,train_size):
                if train.iloc[j,i] > a_max:
                    a_max = train.iloc[j,i]
            a_num_arr.append(a_max+1)
            #加一是因为最大取值加一等于取值个数
        
        return a_num_arr


    def ValueCounter (self,train,attribute): #计算离散属性每种取值的个数
        train_size = train.shape[0]
        a_num = self.AttributeMax(train)[attribute] #属性总取值个数
        a_value = []

        for i in range (0,a_num):
            a_value.append(0)

        for i in range (0,train_size):
            a = train.iloc[i,attribute]
            a_value[a] += 1

        return a_value


    # def Accuracy_Test(self,train,test,method,categorization,k): #算法测试
    #     test_size = test.shape[0]
    #     col = test.shape[1]
    #     acc = 0
    #     self.KNN = KNN.KNearestNeighbor(train,k,categorization)
    #     self.NB = NB.NaiveBayes(train,categorization)
    #     self.DT = DT.DecisionTree(train,categorization)

    #     for i in range (0,test_size):
    #         sample = test.iloc[i:i+1,0:col] #构建当前读取的样本
    #         r = 0

    #         if method == 'KNN':
    #             r = self.KNN.KNN_Model(sample)

    #         elif method == 'NaiveBayes':
    #             r = self.NB.NaiveBayes_Model(sample)

    #         elif method == 'DecisionTree':
    #             r = self.DT.DecisionTree_Model(sample)

    #         elif method == 'AdaBoost':
    #             pass

    #         if r == test.iloc[i,categorization]:
    #             acc += 1
    #         print (acc)
    #     accuracy = acc / test_size

    #     return accuracy


    def Sampling (self,train,start,sample_size): 
    #在训练集train中抽取从start开始连续样本,返回列表前抽取部分后剩余部分
        stop = start + sample_size
        train_size = train.shape[0]
        col = train.shape[1]
        dataset_arr = []

        validation_data = train.iloc[start:stop] #验证集
        train_data_first = train.iloc[0:start]
        train_data_second = train.iloc[stop:train_size]
        train_data = pd.concat([train_data_first,train_data_second]) #训练集
        validation_data = validation_data.reset_index()
        train_data = train_data.reset_index()
        validation_size = validation_data.shape[0]
        train_size = train_data.shape[0]
        validation_data = validation_data.iloc[0:validation_size,1:col+1]
        train_data = train_data.iloc[0:train_size,1:col+1]

        dataset_arr.append(validation_data)
        dataset_arr.append(train_data)
        return dataset_arr

    
    def RandSampling (self,num,data): #从data中随机抽取num个不同的样本组成数据集
        sampled = [] #判断第i个样本是否已经被抽取
        sub_data = pd.DataFrame()
        
        for i in range (0,data.shape[0]):
            sampled.append(0)

        if num >= data.shape[0]: #抽取数目大于总数直接返回
            sub_data = data.iloc[0:data.shape[0]]
            return sub_data

        for i in range (0,num):
            r = -1

            while r < 0 or sampled[r] == 1: #保证抽取一个不重复的样本
                r = random.randint(0,data.shape[0]-1)

            row = data.iloc[r:r+1,:]

            sub_data = pd.concat([sub_data,row])

        return sub_data