# ##Python实现PCA
import numpy as np
import pandas as pd
#
# def pca(X, k):  # k is the components you want
#     # mean of each feature
#     n_samples, n_features = X.shape
#     mean = np.array([np.mean(X[:, i]) for i in range(n_features)])
#     # normalization
#     norm_X = X - mean
#     # scatter matrix
#     scatter_matrix = np.dot(np.transpose(norm_X), norm_X)
#     # Calculate the eigenvectors and eigenvalues
#     eig_val, eig_vec = np.linalg.eig(scatter_matrix)
#     eig_pairs = [(np.abs(eig_val[i]), eig_vec[:, i]) for i in range(n_features)]
#     # sort eig_vec based on eig_val from highest to lowest
#     eig_pairs.sort(reverse=True)
#     # select the top k eig_vec
#     feature = np.array([ele[1] for ele in eig_pairs[:k]])
#     # get new data
#     data = np.dot(norm_X, np.transpose(feature))
#     return data
#
#
# X = pd.read_csv("wind.csv")
# y=X["ser"].str.split(",",expand=True)
# x1=y[y[0]=="1"]
# x2=y[y[0]=="2"]
# x1=x1.drop([0],axis=1,inplace=False)
# x2=x2.drop([0],axis=1,inplace=False)
#
# X = np.array([[-1, 1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# x1=x1.values
# x2=x2.values
# x1=x1.astype(np.float)
# x2=x2.astype(np.float)
# print(pca(x1,2))
# print(pca(x2,2))


# def Knn(in_data, train_data, train_labels, k):
#     # 计算欧式距离
#     distance = np.zeros(train_data.shape[0])
#     for i in range(train_data.shape[0]):
#         distance[i] = (in_data[0] - train_data[i][0]) ** 2 +(in_data[1] - train_data[i][1]) ** 2+(in_data[2] - train_data[i][2]) ** 2+(in_data[3] - train_data[i][3]) ** 2
#         #开方处理
#         distance[i] = np.power(distance[i], 0.5)
#     # 返回按距离排序的索引
#     index = np.zeros(train_data.shape[0])
#     index = distance.argsort()
#     # 统计前k个最小距离对应的标签个数
#     setosa = 0
#     versicolor = 0
#     virginica=0
#     for i in range(k):
#         if(train_labels[index[i]] == 'Iris-setosa'):
#             setosa += 1
#         elif(train_labels[index[i]] == "Iris-versicolor"):
#             versicolor += 1
#         else:
#             virginica+=1
#     if ((setosa > versicolor)and(setosa>virginica)):
#         print("setosa")
#     elif((versicolor>setosa)and(versicolor>virginica)):
#         print("versicolor")
#     else:
#         print("virginica")
# X = pd.read_csv("iris.csv")
# y=X["ser"].str.split(",",expand=True)
# y=y.values
# y1=y[:,0:4].astype(np.float)
#
# labels=y[:,4]
#
# Knn([6.2,2.2,4.5,1.5],y1,labels,3)

# # -*- coding: utf-8 -*-
#
# import operator
# from numpy import *
# from math import log
#
# #文件读取
# def file2matrix(filename, attribute_num): #传入参数：文件名，属性个数
#  fr = open(filename)
#  arrayOLines = fr.readlines()
#  numberOfLines = len(arrayOLines) #统计数据集行数（样本个数）
#  dataMat = zeros((numberOfLines, attribute_num))
#  classLabelVector = [] #分类标签
#  index = 0
#  for line in arrayOLines:
#   line = line.strip() #strip() 删除字符串中的'\n'
#   listFromLine = line.split() #将一个字符串分裂成多个字符串组成的列表，不带参数时以空格进行分割，当代参数时，以该参数进行分割
#   dataMat[index, : ] = listFromLine[0:attribute_num] #读取数据对象属性值
#   classLabelVector.append(listFromLine[-1]) #读取分类信息
#   index += 1
#  dataSet = [] #数组转化成列表
#  index = 0
#  for index in range(0, numberOfLines):
#   temp = list(dataMat[index, :])
#   temp.append(classLabelVector[index])
#   dataSet.append(temp)
#  return dataSet
#
# #划分数据集
# def splitDataSet(dataSet, axis, value):
#  retDataSet = []
#  for featvec in dataSet: #每行
#   if featvec[axis] == value: #每行中第axis个元素和value相等 #删除对应的元素，并将此行，加入到rerDataSet
#    reducedFeatVec = featvec[:axis]
#    if(not(isinstance(reducedFeatVec,list))):
#         reducedFeatVec=reducedFeatVec.tolist()
#    reducedFeatVec.extend(featvec[axis+1:])
#    retDataSet.append(reducedFeatVec)
#  return retDataSet
#
# #计算香农熵 #计算数据集的香农熵 == 计算数据集类标签的香农熵
# def calcShannonEnt(dataSet):
#  numEntries = len(dataSet) #数据集样本点个数
#  labelCounts = {} #类标签
#  for featVec in dataSet: #统计数据集类标签的个数，字典形式
#   currentLabel = featVec[-1]
#   if currentLabel not in labelCounts.keys():
#    labelCounts[currentLabel] = 0
#   labelCounts[currentLabel] += 1
#  shannonEnt = 0.0
#  for key in labelCounts:
#   prob = float(labelCounts[key])/numEntries
#   shannonEnt -= prob * log(prob, 2)
#  return shannonEnt
#
# #根据香农熵，选择最优的划分方式 #根据某一属性划分后，类标签香农熵越低，效果越好
# def chooseBestFeatureToSplit(dataSet):
#  baseEntropy = calcShannonEnt(dataSet) #计算数据集的香农熵
#  numFeatures = len(dataSet[0])-1
#  bestInfoGain = 0.0 #最大信息增益
#  bestFeature = 0 #最优特征
#  for i in range(0, numFeatures):
#   featList = [example[i] for example in dataSet] #所有子列表（每行）的第i个元素，组成一个新的列表
#   uniqueVals = set(featList)
#   newEntorpy = 0.0
#   for value in uniqueVals: #数据集根据第i个属性进行划分，计算划分后数据集的香农熵
#    subDataSet = splitDataSet(dataSet, i, value)
#    prob = len(subDataSet)/float(len(dataSet))
#    newEntorpy += prob*calcShannonEnt(subDataSet)
#   infoGain = baseEntropy-newEntorpy #划分后的数据集，香农熵越小越好，即信息增益越大越好
#   if(infoGain > bestInfoGain):
#    bestInfoGain = infoGain
#    bestFeature = i
#  return bestFeature
#
# #如果数据集已经处理了所有属性，但叶子结点中类标签依然不是唯一的，此时需要决定如何定义该叶子结点。这种情况下，采用多数表决方法，对该叶子结点进行分类
# def majorityCnt(classList): #传入参数：叶子结点中的类标签
#  classCount = {}
#  for vote in classList:
#   if vote not in classCount.keys():
#    classCount[vote] = 0
#    classCount[vote] += 1
#  sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
#  return sortedClassCount[0][0]
#
# #创建树
# def createTree(dataSet, labels): #传入参数：数据集，属性标签（属性标签作用：在输出结果时，决策树的构建更加清晰）
#  classList = [example[-1] for example in dataSet] #数据集样本的类标签
#  if classList.count(classList[0]) == len(classList): #如果数据集样本属于同一类，说明该叶子结点划分完毕
#   return classList[0]
#  if len(dataSet[0]) == 1: #如果数据集样本只有一列（该列是类标签），说明所有属性都划分完毕，则根据多数表决方法，对该叶子结点进行分类
#   return majorityCnt(classList)
#  bestFeat = chooseBestFeatureToSplit(dataSet) #根据香农熵，选择最优的划分方式
#  bestFeatLabel = labels[bestFeat] #记录该属性标签
#  myTree = {bestFeatLabel:{}} #树
#  del(labels[bestFeat]) #在属性标签中删除该属性
#  #根据最优属性构建树
#  featValues = [example[bestFeat] for example in dataSet]
#  uniqueVals = set(featValues)
#  for value in uniqueVals:
#   subLabels = labels[:]
#   subDataSet = splitDataSet(dataSet, bestFeat, value)
#   myTree[bestFeatLabel][value] = createTree(subDataSet, subLabels)
#  return myTree
#
# #测试算法：使用决策树，对待分类样本进行分类
# def classify(inputTree, featLabels, testVec): #传入参数：决策树，属性标签，待分类样本
#  firstStr = list(inputTree.keys())[0] #树根代表的属性
#  secondDict = inputTree[firstStr]
#  featIndex = featLabels.index(firstStr) #树根代表的属性，所在属性标签中的位置，即第几个属性
#  for key in secondDict.keys():
#   if testVec[featIndex] == key:
#    if type(secondDict[key]).__name__ == 'dict':
#     classLabel = classify(secondDict[key], featLabels, testVec)
#    else:
#     classLabel = secondDict[key]
#  return classLabel
#
# def main():
#  X = pd.read_csv("wind1.csv")
#  y=X["ser"].str.split(",",expand=True)
#  y[[0,13]]=y[[13,0]]
#  y=y.values
#  y=y.astype(np.float)
#
#  #dataSet = file2matrix('test_sample.txt', 4)
#  labels = ['1', '2', '3','4','5','6','7','8','9','10','11','12','13']
#  labelsForCreateTree = labels[:]
#  y=np.array(y)
#  Tree = createTree(y, labelsForCreateTree )
#  testvec = [2, 3, 2, 3]
#  testvec1 = [3.00000,14.13000,4.10000,2.74000,24.50000,96.00000,2.05000,0.76000,0.56000,1.35000,9.20000,0.61000,1.60000,560.00000]
#  print (classify(Tree, labels, testvec1))
# if __name__ == '__main__':
#   main()

# class Naive_Bayes:
#     def __init__(self):
#         pass
#
#     # 朴素贝叶斯训练过程
#     def nb_fit(self, X, y):
#         classes = y[y.columns[0]].unique()
#         class_count = y[y.columns[0]].value_counts()
#         # 类先验概率
#         class_prior = class_count / len(y)
#         # 计算类条件概率
#         prior = dict()
#         for col in X.columns:
#             for j in classes:
#                 p_x_y = X[(y == j).values][col].value_counts()
#                 for i in p_x_y.index:
#                     prior[(col, i, j)] = p_x_y[i] / class_count[j]
#
#         return classes, class_prior, prior
#
#     # 预测新的实例
#     def predict(self, X_test):
#         res = []
#         for c in classes:
#             p_y = class_prior[c]
#             p_x_y = 1
#             for i in X_test.items():
#                 p_x_y *= prior[tuple(list(i) + [c])]
#             res.append(p_y * p_x_y)
#         return classes[np.argmax(res)]
#
#
# if __name__ == "__main__":
#     x1 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]
#     x2 = ['S', 'M', 'M', 'S', 'S', 'S', 'M', 'M', 'L', 'L', 'L', 'M', 'M', 'L', 'L']
#     y = [-1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1]
#     df = pd.DataFrame({'x1': x1, 'x2': x2, 'y': y})
#     X = df[['x1', 'x2']]
#     y = df[['y']]
#     X_test = {'x1': 2, 'x2': 'S'}
#
#     nb = Naive_Bayes()
#     classes, class_prior, prior = nb.nb_fit(X, y)
#     print('测试数据预测类别为：', nb.predict(X_test))

# import random
# import math
#
# k = 2
# i = 1
# df = pd.read_csv("mytest.csv")
# data = np.array(df)
# print(df.head(10))
# min1 = data.min(axis=0)
# max1 = data.max(axis=0)
#
# # 在数据最大最小值中随机生成k个初始聚类中心，保存为t
# centre = np.empty((k, 2))
# for i in range(k):
#     centre[i][0] = random.randint(min1[0], max1[0])  # 平时成绩
#     centre[i][1] = random.randint(min1[1], max1[1])  # 期末成绩
#
# while i < 10:
#     print(i)
#     # 计算欧氏距离
#     def euclidean_distance(List, t):
#         return math.sqrt(((List[0] - t[0]) ** 2 + (List[1] - t[1]) ** 2))
#
#
#     # 每个点到每个中心点的距离矩阵
#     dis = np.empty((len(data), k))
#     for i in range(len(data)):
#         for j in range(k):
#             dis[i][j] = euclidean_distance(data[i], centre[j])
#
#     # 初始化分类矩阵
#     classify = []
#     for i in range(k):
#         classify.append([])
#
#     # 比较距离并分类
#     for i in range(len(data)):
#         List = dis[i].tolist()
#         index = List.index(dis[i].min())
#         classify[index].append(i)
#
#     # 构造新的中心点
#     new_centre = np.empty((k, 2))
#     for i in range(len(classify)):
#         if(len(classify[i])>=2):
#             new_centre[i][0] = np.sum(data[classify[i]][0]) / len(classify[i])
#             new_centre[i][1] = np.sum(data[classify[i]][1]) / len(classify[i])
#
#     # 比较新的中心点和旧的中心点是否一样
#     if (new_centre == centre).all():
#         break
#     else:
#         centre = new_centre
#         i = i + 1
#
# # print('迭代次数为：',i)
# print('聚类中心为：', new_centre)
# print('分类情况为：', classify)

import time


def loadDataSet():
    return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]


def createC1(dataSet):
    C1 = []
    for transaction in dataSet:
        for item in transaction:
            if not [item] in C1:
                C1.append([item])
    C1.sort()
    return list(map(frozenset, C1))


def scanD(D, Ck, minSupport):
    ssCnt = {}
    for tid in D:
        for can in Ck:
            if can.issubset(tid):
                if not can in ssCnt:
                    ssCnt[can] = 1
                else:
                    ssCnt[can] += 1
    numItems = float(len(D))
    retList = []
    supportData = {}
    for key in ssCnt:
        support = ssCnt[key] / numItems
        if support >= minSupport:
            # retList.insert(0,key)
            retList.append(key)
        supportData[key] = support
    return retList, supportData


# def aprioriGen(Lk, k):
#     retList = []
#     lenLk = len(Lk)
#     for i in range(lenLk):
#         for j in range(i+1, lenLk):
#             L1 = list(Lk[i])[:k-2]
#             L2 = list(Lk[j])[:k-2]
#             L1.sort()
#             L2.sort()
#             if L1 == L2:
#                 retList.append(Lk[i]|Lk[j])
#     return retList

def aprioriGen(Lk, k):
    lenLk = len(Lk)
    temp_dict = {}
    for i in range(lenLk):
        for j in range(i + 1, lenLk):
            L1 = Lk[i] | Lk[j]
            if len(L1) == k:
                if not L1 in temp_dict:
                    temp_dict[L1] = 1
    return list(temp_dict)


def apriori(dataSet, minSupport=0.5):
    C1 = createC1(dataSet)
    # print("C1",C1)
    D = list(map(set, dataSet))
    L1, supportData = scanD(D, C1, minSupport)
    # print("L1",L1)
    L = [L1]
    k = 2
    while (len(L[k - 2]) > 0):
        Ck = aprioriGen(L[k - 2], k)  # 生成候选项集
        # print("生成的候选项集",Ck)
        Lk, supK = scanD(D, Ck, minSupport)  # 按支持度筛选候选项集
        # print("筛选出频繁项集",Lk)
        supportData.update(supK)
        L.append(Lk)
        k += 1
    return L, supportData


def calcConf(freqSet, H, supportData, br1, minConf=0.7):  # 筛选符合可信度要求的规则，并返回符合可信度要求的右件
    prunedH = []  # 存储符合可信度的右件
    for conseq in H:  # conseq就是右件，freqSet是原始频繁项,freqSet-conseq是左件
        conf = supportData[freqSet] / supportData[freqSet - conseq]  # 计算可信度
        if conf >= minConf:
            print(freqSet - conseq, "-->", conseq, "\tconf:", conf)
            br1.append((freqSet - conseq, conseq, conf))
        else:
            prunedH.append(conseq)  # 不符合可信度的右件添加到列表中
    return prunedH


# def rulesFromConseq(freqSet, H, supportData, br1, minConf=0.7):  # 原版Apriori原理来减少创造的规则
#     m = len(H[0])
#     if (len(freqSet)>(m+1)):
#         Hmp1 = aprioriGen(H, m+1)
#         Hmp1 = calcConf(freqSet, Hmp1, supportData, br1, minConf)
#         if (len(Hmp1) >1):
#             rulesFromConseq(freqSet, Hmp1, supportData, br1,minConf)

def rulesFromConseq(freqSet, H, supportData, br1, minConf=0.7):  # 新版Apriori原理来减少创造的规则
    is_find = True  # 循环标志
    m = 1  # 先创造右件为一个元素的规则
    Hmp1 = H  # H是初始频繁项分散后的列表，[frozenset({2}),frozenset({3}),frozenset({5)],Hmp1是组合后的右件，因为我们的aprioriGen不能组建只有1个元素的右件，所以右件为1个元素的时候我们直接H赋值过去，当右件元素数是2以上的时候，再用aprioriGen组合出来
    while is_find:
        if len(freqSet) > m:  # 最多循环len(freqSet)-1次，因为右件最多len(freqSet)-1个元素，右件元素的数从1增长到len(freqSet)-1，故最多循环len(freqSet)-1次
            if m > 1:  # 我们改编的aprioriGen()函数至少产生C2,不能产生C1，因此这里加了if
                Hmp1 = aprioriGen(H, m)  # H里的元素自由组合成右件，右件的元素个数是m
            H_no = calcConf(freqSet, Hmp1, supportData, br1, minConf)  # 筛选符合可信度的规则,把不符合的右件存起来
            if len(H_no) != 0:  # 如果有不满足可信度的右件
                H_no = list(set(frozenset([item]) for row in Hmp1 for item in
                                row))  # 我们把列表中的每个元素都分割出来，比如[{2,3},{3,4}] 分割后为[{2},{3},{4}]，方便我们再次组合，这里也是Apriori原理的精髓所在，这么操作就是把不满足的右件及其超集提出来，然后后面做减法。
                H = list(set(H) - set(H_no))  # 可组合的集合减去不满足可信度的右件的集合
            m = m + 1  # 右件个数不断增加，第一次右件元素只有1个，第二次循环右件元素就有两个了
            if len(H) < m:  # 如果剩余的可自由组合的元素个数少于新右件所需要的元素数，比如就剩两个元素可组合了，想要组成C3作右件，肯定是不可能的，那么结束循环
                is_find = False
        else:  # 如果循环次数达到最大，也结束循环
            is_find = False


def generateRules(L, supportData, minConf=0.7):  # 产生规则
    bigRuleList = []
    for i in range(1, len(L)):  # 从L2开始创造规则
        for freqSet in L[i]:
            H1 = [frozenset([item]) for item in freqSet]
            if i > 1:  # L3开始使用Apriori原理
                rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
            else:  # L2不能使用Apriori原理，只能老老实实挨个创造规则
                calcConf(freqSet, H1, supportData, bigRuleList, minConf)
    return bigRuleList


if __name__ == "__main__":
    dataSet = loadDataSet()
    begin_time = time.time()
    L, suppData = apriori(dataSet)

    rules = generateRules(L, suppData, minConf=0.5)
    end_time = time.time()
    print("程序花费时间{}秒".format(end_time - begin_time))
    # print(L)
    # print(suppData)