import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split


# 导入数据并添加列名
data = pd.read_csv('F:agaricus-lepiota.data', header=None)
columns = ["classes", "cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
           "gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
           "stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type", "veil-color",
           "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
data.columns = columns



#菌盖颜色（’cap-color’）

import matplotlib.pyplot as plt  
import matplotlib as mpl  
import seaborn as sns  
import numpy as np  # 导入numpy库  
  
# 假设data是一个pandas DataFrame，并且包含'cap-color'列  
cap_colors = data['cap-color'].value_counts()  
m_height = cap_colors.values.tolist()  
cap_color_labels = cap_colors.index.tolist()  # 使用index来获取标签  
  
print(m_height)  
print(cap_color_labels)  
  
def autolabel(rects, ax, fontsize=14):  # 将ax作为参数传入  
    for rect in rects:  
        height = rect.get_height()  
        ax.text(rect.get_x() + rect.get_width()/2, 1*height,'%d' % int(height),  
                ha='center', va='bottom', fontsize=fontsize)  
  
ind = np.arange(len(m_height))  # 使用len(m_height)来确定范围  
width = 0.7  
  
colors = ['#DEB887', '#778899', '#DC143C', '#FFFF99', '#f8f8ff', '#F0DC82', '#FF69B4', '#D22D1E', '#C000C5', 'g']  
  
fig, ax = plt.subplots(figsize=(10, 7))  
cap_colors_bars = ax.bar(ind, m_height, width, color=colors)  
  
ax.set_xlabel("Cap Color", fontsize=20)  
ax.set_ylabel('Quantity', fontsize=20)  
ax.set_title('Mushroom Cap Color Quantity', fontsize=22)  
ax.set_xticks(ind)  
ax.set_xticklabels(cap_color_labels, fontsize=12)  # 使用cap_color_labels作为标签  
  
autolabel(cap_colors_bars, ax)  # 传入ax到autolabel函数  
plt.show()


import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
cap_colors = data['cap-color'].value_counts() #计算各种颜色的数量
m_height = cap_colors.values.tolist()  
cap_colors.axes
cap_color_labels = cap_colors.axes[0].tolist()  
print(m_height)
print(cap_color_labels)
def autolabel(rects,fontsize=14):
    for rect in rects:
        height = rect.get_height()
        ax.text(rect.get_x() + rect.get_width()/2, 1*height,'%d' % int(height),
                ha='center', va='bottom',fontsize=fontsize)

ind = np.arange(10)  
width = 0.7   
# 创建两个列表，分别为各颜色有毒蘑菇的数量和个颜色食用菌的数量
poisonous_cc = []
edible_cc = []

for capColor in cap_color_labels:
    size = len(data[data['cap-color'] == capColor].index)  # 各颜色蘑菇总数
    edibles = len(data[(data['cap-color'] == capColor) & (data['classes'] == 'e')].index)  # 各颜色食用菌的数量
    edible_cc.append(edibles)
    poisonous_cc.append(size - edibles)   
print(edible_cc)
print(poisonous_cc)

width = 0.4
fig, ax = plt.subplots(figsize=(14, 8))
edible_bars = ax.bar(ind, edible_cc, width, color='#FFB90F')  # 画食用菌的bars
# 有毒菌在食用菌右侧移动width个单位
poison_bars = ax.bar(ind + width, poisonous_cc, width, color='#4A708B')

ax.set_xlabel("Cap Color", fontsize=20)
ax.set_ylabel('Quantity', fontsize=20)
ax.set_title('Edible and Poisonous Mushrooms Based on Cap Color', fontsize=22)
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('brown', 'gray', 'red', 'yellow', 'white', 'buff', 'pink', 'cinnamon', 'purple', 'green'),
                   fontsize=12)
ax.legend((edible_bars, poison_bars), ('edible', 'poisonous'), fontsize=17)
autolabel(edible_bars, 10)
autolabel(poison_bars, 10)
plt.show()

#菌菇气味（'odor'）

import matplotlib.pyplot as plt  
import matplotlib as mpl  
import seaborn as sns  
import numpy as np  # 导入numpy库  
  
# 假设data是一个pandas DataFrame，并且包含'cap-color'列  
cap_colors = data['odor'].value_counts()  
m_height = cap_colors.values.tolist()  
cap_color_labels = cap_colors.index.tolist()  # 使用index来获取标签  
  
print(m_height)  
print(cap_color_labels)  
  
def autolabel(rects, ax, fontsize=14):  # 将ax作为参数传入  
    for rect in rects:  
        height = rect.get_height()  
        ax.text(rect.get_x() + rect.get_width()/2, 1*height,'%d' % int(height),  
                ha='center', va='bottom', fontsize=fontsize)  
  
ind = np.arange(len(m_height))  # 使用len(m_height)来确定范围  
width = 0.7  
  
colors = ['#DEB887', '#778899', '#DC143C', '#FFFF99', '#f8f8ff', '#F0DC82', '#FF69B4', '#D22D1E', '#C000C5', 'g']  
  
fig, ax = plt.subplots(figsize=(10, 7))  
cap_colors_bars = ax.bar(ind, m_height, width, color=colors)  
  
ax.set_xlabel("odor", fontsize=20)  
ax.set_ylabel('Quantity', fontsize=20)  
ax.set_title('Mushroom odor Quantity', fontsize=22)  
ax.set_xticks(ind)  
ax.set_xticklabels(cap_color_labels, fontsize=12)  # 使用cap_color_labels作为标签  
  
autolabel(cap_colors_bars, ax)  # 传入ax到autolabel函数  
plt.show()


import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
cap_colors = data['odor'].value_counts() #计算各种颜色的数量
m_height = cap_colors.values.tolist()  
cap_colors.axes
cap_color_labels = cap_colors.axes[0].tolist()  
print(m_height)
print(cap_color_labels)
def autolabel(rects,fontsize=14):
    for rect in rects:
        height = rect.get_height()
        ax.text(rect.get_x() + rect.get_width()/2, 1*height,'%d' % int(height),
                ha='center', va='bottom',fontsize=fontsize)

ind = np.arange(9)  
width = 0.7   
# 创建两个列表，分别为各颜色有毒蘑菇的数量和个颜色食用菌的数量
poisonous_cc = []
edible_cc = []

for capColor in cap_color_labels:
    size = len(data[data['odor'] == capColor].index)  # 各颜色蘑菇总数
    edibles = len(data[(data['odor'] == capColor) & (data['classes'] == 'e')].index)  # 各颜色食用菌的数量
    edible_cc.append(edibles)
    poisonous_cc.append(size - edibles)   
print(edible_cc)
print(poisonous_cc)

width = 0.4
fig, ax = plt.subplots(figsize=(14, 8))
edible_bars = ax.bar(ind, edible_cc, width, color='#FFB90F')  # 画食用菌的bars
# 有毒菌在食用菌右侧移动width个单位
poison_bars = ax.bar(ind + width, poisonous_cc, width, color='#4A708B')

ax.set_xlabel("odor", fontsize=20)
ax.set_ylabel('Quantity', fontsize=20)
ax.set_title('Edible and Poisonous Mushrooms Based on odor', fontsize=22)
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('almond', 'anise', 'creosote', 'fishy', 'foul', 'musty', 'none', 'pungent', 'spicy'),
                   fontsize=12)
ax.legend((edible_bars, poison_bars), ('edible', 'poisonous'), fontsize=17)
autolabel(edible_bars, 10)
autolabel(poison_bars, 10)
plt.show()


def analysis_poison(data, index_name):
    data["classes"].replace({"p": 1, "e": 0}, inplace=True)
    return data.groupby([index_name])["classes"].sum() / pd.value_counts(data[index_name])

# pd.value_counts(a)
# analysis_poison(dataset[["class","cap-color"]],"cap-color")    
plt.close()
plt.figure(figsize=(16, 30))
i = 1
danger = []
for index_name in data.columns[1:]:
    result = analysis_poison(data[["classes", index_name]], index_name)
    ax = plt.subplot(6, 4, i)
    ax.set_title(index_name)
    result.plot(kind="bar", color='#778899')
    temp = result[result > 0.75]
    temp = temp.rename(index=lambda x: ":".join([index_name, x]))
    danger.append(temp)
    # plt.bar(range(len(result)),result.data)
    i += 1
plt.show()

# 替换缺失值
data.replace('?', np.nan).fillna(method='pad', inplace=True)

# 对特征数小于3的列进行LabelEncoder编码
labelencoder = LabelEncoder()
for col in ["classes", "bruises", "gill-size", "stalk-shape", "veil-type"]:
    data[col] = labelencoder.fit_transform(data[col])

# 对剩余属性进行OneHotEncoder编码
a = ["cap-shape", "cap-surface", "cap-color", "odor", "gill-attachment",
     "gill-spacing", "gill-color", "stalk-root", "stalk-surface-above-ring",
     "stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring",
     "veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
enc = OneHotEncoder(sparse=False)
for i in a:
    enc.fit(data[[i]])
    result = enc.transform(data[[i]])
    nattr = len(result[0])
    attr_list = []
    for j in range(nattr):
        data[i + '_' + str(j)] = result[:, j]

# print(data.head())
# print(data.columns)

for i in a:
    A = data.loc[:, [i]].values.tolist()
    enc.fit(A)
    result = enc.transform(A)
    nattr = len(result[0])
    attr_list = []
    for j in range(nattr):
        data.loc[:, str(i) + str(j)] = 0
        attr_list.append(str(i) + str(j))
    data.loc[:, attr_list] = result

#再将Label赋给Y，剩余的属性值赋给X

Y = data["classes"]
# print(Y)
data.drop("classes", axis=1, inplace=True)
X = data.drop(a, axis=1)
#print(X)

#将数据划分为训练集和测试集
trainX, testX, trainY, testY = train_test_split(X, Y, test_size=0.33, random_state=4)

from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score


# #决策树
from sklearn.model_selection import GridSearchCV
from sklearn import tree

# def decision_tree(trainX, trainY, testX, testY):
#     # start = time.time()
#     param = {'criterion': ['entropy'], 'max_depth': list(range(7, 12)), 'min_samples_split': list(range(8, 12))}
#     grid = GridSearchCV(tree.DecisionTreeClassifier(), param_grid=param, cv=5)  # 寻找最优参数
#     grid.fit(trainX, trainY)
#     print('最优分类器:', grid.best_params_, '最优分数:', grid.best_score_)  # 最优参数下1次交叉验证的结果
#     # start2 = time.time()
#     clf = tree.DecisionTreeClassifier(**grid.best_params_)
#     clf.fit(trainX, trainY)
#     score = clf.score(testX, testY)
#     print("Test score on test data:", score)

# decision_tree(trainX, trainY, testX, testY)

def decision_tree(trainX, trainY, testX, testY):
	# start = time.time()
	param = {'criterion':['entropy'],'max_depth':list(range(7,12)),'min_samples_split':list(range(8,12))}
	grid = GridSearchCV(tree.DecisionTreeClassifier(),param_grid=param,cv=5)#寻找最优参数
	grid.fit(trainX,trainY)
	print('最优分类器:',grid.best_params_,'最优分数:', grid.best_score_)#最优参数下1次交叉验证的结果
	# start2 = time.time()
	clf = tree.DecisionTreeClassifier(**grid.best_params_)
	clf.fit(trainX, trainY)
	score = clf.score(testX, testY)
	print("Test score on test data:", score)

decision_tree(trainX, trainY, testX, testY)

#sklearn的高斯朴素贝叶斯算法
def naivebayes2(trainX, trainY, testX, testY):
    scaler = StandardScaler()
    scaler.fit(trainX)  # 寻找最优参数
    trainX2 = scaler.transform(trainX)
    testX2 = scaler.transform(testX)
    clf = GaussianNB().fit(trainX2, trainY)
    pred = clf.predict(testX2)
    accuracy = accuracy_score(pred, testY)
    print("accuracy of GaussianNB:", accuracy)
    return accuracy

#naivebayes2(trainX, trainY, testX, testY)


#神经网络
from sklearn.model_selection import GridSearchCV

from sklearn.neural_network import MLPClassifier

def neural_network():
    scaler = StandardScaler()  # 归一化处理
    scaler.fit(trainX)
    trainX1 = scaler.transform(trainX)
    param = {'hidden_layer_sizes': [(20, 10), (10, 5)]}
    grid = GridSearchCV(MLPClassifier(), param_grid=param, cv=5)
    grid.fit(trainX1, trainY)
    print('最优分类器:', grid.best_params_, '最优分数:', grid.best_score_)
    testX1 = scaler.transform(testX)
    print("Score of NN is", grid.score(testX1, testY))
    return grid.best_score_

#neural_network()



#SVM支持向量机

import numpy as np  
import math  
from sklearn.preprocessing import StandardScaler  
from sklearn.svm import SVC  
  
# 计算Gamma  
def calGamma(X):  
    # 随机抽取样本点，对距离进行排序，找到所有距离最远的gamma  
    N = X.shape[0]  
    L = np.array(range(N))  
    np.random.shuffle(L)  
    d = []  
    for i in range(1, round(N * 0.01)):  
        for j in range(round(N * 0.01) + 1, round(N * 0.02)):  
            xi = X[L[i], :]  
            xj = X[L[j], :]  
            d.append(np.sum((xi - xj) ** 2))  # 使用numpy的sum函数来计算平方和  
    d.sort()  
    idx = len(d)  
    dn = d[round((idx - 1) * 0.1)]  
    df = d[round((idx - 1) * 0.9)]  
    gamma = 1 / (2 * (df - dn) / (2 * (math.log(df) - math.log(dn))))  
    return gamma  
  
def SVM(trainX, trainY, testX, testY):  
    scaler = StandardScaler()  # 标准化  
    scaler.fit(trainX)  
    trainX3 = scaler.transform(trainX)  
    gamma = calGamma(trainX3)  # 由训练与算法评估得到  
  
    model = SVC(C=45, kernel='rbf', gamma=gamma)  
    model.fit(trainX3, trainY)  
    print("Score of the svm on the test data:", model.score(scaler.transform(testX), testY))  # 直接在transform中标准化测试数据  
  
    return model.score(scaler.transform(testX), testY)  

SVM(trainX, trainY, testX, testY)

# # K倍交叉验证法  
  
# from sklearn.model_selection import KFold  
# import matplotlib.pyplot as plt  
  
# def KF(data2, k=10):  
#     """  
#     执行K倍交叉验证，返回训练集和测试集的特征和标签  
  
#     参数:  
#     data2 -- 一个包含两个元素的元组：(features_df, labels_series)  
#     k -- 交叉验证的折数，默认为10  
  
#     返回:  
#     train_X, train_Y, test_X, test_Y -- 列表形式的训练集和测试集的特征和标签  
#     """  
#     kf = KFold(n_splits=k)  
  
#     train_X = []  
#     train_Y = []  
#     test_X = []  
#     test_Y = []  
  
#     for train_index, test_index in kf.split(data2[0]):  
#         train_X.append(data2[0].iloc[train_index, :])  
#         train_Y.extend(data2[1].iloc[train_index])  # 使用extend来合并列表  
#         test_X.append(data2[0].iloc[test_index, :])  
#         test_Y.extend(data2[1].iloc[test_index])  # 使用extend来合并列表  
  
#     return train_X, train_Y, test_X, test_Y  
  
# def kflod():  
#     """  
#     使用K倍交叉验证法评估不同算法的性能  
  
#     注意：这个函数假设'KF'函数已经返回了正确的数据，并且你已经有了一个方法来评估每种算法的性能（例如，通过准确率）  
#     """  
#     # 假设data2已经定义，并且是一个包含特征和标签的元组  
#     kf_data = KF(data2)  # 假设data2已经定义  
  
#     clf_names = ['decision_tree', 'naivebayes2', 'neural_network', 'SVM']  
#     fig = plt.figure()  
  
#     for i, clf_name in enumerate(clf_names):  
#         scores = []  
#         ax = fig.add_subplot(2, 2, i+1)  
  
#         # 这里应该有一个评估函数来评估每种算法的性能，但这里仅作演示  
#         # 注意：实际使用时，你需要定义评估函数并传入正确的参数  
#         for j in range(len(kf_data[0])):  
#             # 假设evaluate_clf是一个函数，返回每种算法的分数  
#             # score = evaluate_clf(clf_name, kf_data[0][j], kf_data[1][j], kf_data[2][j], kf_data[3][j])  
#             score = j  # 只是一个模拟的分数  
#             scores.append(score)  
  
#         ax.boxplot(scores)  
#         ax.set_title(clf_name)  
  
# # 注意：你需要定义data2和评估函数来使用kflod()  
# # 例如：  
# data2 = (pd.DataFrame(X), pd.Series(Y))  
# kflod()