'''
# MNB实现
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
import numpy as np
X=np.random.randint(5,size=(60,5))
y=np.random.randint(5,size=(60,))
mnb=MultinomialNB(alpha=1.0e-10,class_prior=None,fit_prior=True)
mnb.fit(X,y)
print("类先验概率：",np.exp(mnb.class_log_prior_))
print("每个标签下包含的样本数：",mnb.class_count_)
print("预测的准确率：",accuracy_score(y,mnb.predict(X)))
print("预测的分类：",mnb.predict(X))


#sk_learn 求解logistic回归
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
X,y=load_iris(return_X_y=True)
clf=LogisticRegression(random_state=0,solver='lbfgs').fit(X,y)
print("预测前两个样本的分类结果为：",clf.predict(X[:2,:]))
print("预测前两个样本的分类概率为：",clf.predict_proba(X[:2,:]))
print("预测的准确率为：",clf.score(X,y))


# 函数极限计算
from sympy import limit,var,sin
x= var('x')
s=sin(x)/abs(x)
t1=limit(s,x,0,'+')
t2=limit(s,x,0,'-')
print(t1,t2)

from sympy import limit,symbols,oo
x=symbols('x')
S=(1+1/x)**x
t1=limit(s,x,-00)
#t1=limit(s,x,-oo)
print(t1)

# 数列极限计算
from sympy import symbols,limit
n = symbols('n', integer=True)
s=(1+1/n)**n
t1=limit(s,n,oo)
print(t1)

from sympy import symbols,limit,sin,pi,oo
#重要
n=symbols('n',integer=True)
s=sin(pi*n)
t1=limit(s,x,oo)
print(t1)

from sympy import limit,symbols,sin,pi,oo
n=symbols('n',integer=True)
s=limit(n**2*(1-n*sin(1/n)),n,oo)
s.doit()
print(s)

# dsolve常微分方程求解
from sympy import symbols, Eq, dsolve, sin, Function
x = symbols('x')
y = Function('y')(x)
eq = Eq(y.diff(x, 2) - 2 * y.diff(x) + y, sin(x))  # y" - 2y' + y = sin(x)
solution = dsolve(eq, y)  # 求通解
print(solution)

# 绘制三维参数曲线方程
from sympy.plotting import plot3d_parametric_line
u = symbols('u')
plot3d_parametric_line(cos(u), sin(u),u,(u, -10, 10))
# 绘制三维曲面方程
from sympy.plotting import plot3d
x,y= symbols('x y')
plot3d(x**2+y**2,(x,-5,5),(y,-5,5))

# # 分离训练集和测试集
# import numpy as np
# import matplotlib.pyplot as plt
# from sklearn import datasets
from sklearn.model_selection import train_test_split  # 导入 train_test_split 函数
# from sklearn.discriminant_analysis  import LinearDiscriminantAnalysis
# iris = datasets.load_iris()
# X=iris.data
# y=iris.target
# target_names=iris.target_names
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# # 四维降一维
# lda=LinearDiscriminantAnalysis(n_components=1)
# X_r2=lda.fit(X,y).transform(X)
# X_Zreo=np.zeros(X_r2.shape)
X_train_r2 = lda.fit(X_train, y_train).transform(X_train)
X_test_r2 = lda.transform(X_test)

X_Zreo_train = np.zeros(X_train_r2.shape)
X_Zreo_test = np.zeros(X_test_r2.shape)
# for c,j,target_name in zip("rgb",[0,1,2],target_names):
#     plt.scatter(X_r2[y==j],X_Zreo[y==j],c=c,label=target_name)
# 降为2维
#     plt.scatter(X_r2[:,0][y==j],X_Zreo[:,1][y==j],c=c,label=target_name)
    plt.scatter(X_train_r2[y_train == j], X_Zreo_train[y_train == j], c=c, label=f"{target_name} (Train)")
    plt.scatter(X_test_r2[y_test == j], X_Zreo_test[y_test == j], c=c, label=f"{target_name} (Test)", marker='x')
# plt.grid()
# plt.legend()
# plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split  # 导入 train_test_split 函数

iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names

# 分割数据集为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# 四维降一维
lda = LinearDiscriminantAnalysis(n_components=1)
X_train_r2 = lda.fit(X_train, y_train).transform(X_train)
X_test_r2 = lda.transform(X_test)

X_Zreo_train = np.zeros(X_train_r2.shape)
X_Zreo_test = np.zeros(X_test_r2.shape)

for c, j, target_name in zip("rgb", [0, 1, 2], target_names):
    plt.scatter(X_train_r2[y_train == j], X_Zreo_train[y_train == j], c=c, label=f"{target_name} (Train)")
    plt.scatter(X_test_r2[y_test == j], X_Zreo_test[y_test == j], c=c, label=f"{target_name} (Test)", marker='x')

plt.grid()
plt.legend()
plt.show()
'''
