from pyexpat import features

import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import degree
from sklearn.linear_model import LinearRegression, LogisticRegression


# 绘制一条 直线 给定w b 值 y = wx+b
def test1():
    print("111")
    w, b = 3, 6
    # x=[1,2,3,4,5,6]
    x = np.linspace(0, 10, 11)
    print(x, type(x))
    #  y = []
    # numpy包 特点是尽量照顾数学系习惯
    y = 3 * x + 6
    print(y)
    plt.title("xx")
    plt.scatter(x, y, c='r')
    plt.plot(x, y, c='b')
    plt.show()


# https://gitee.com/xieweig/pytorching
# 绘制 激活函数 sigmoid y = 1/ (1+e(-x))
def test2():
    x = np.linspace(-10, 10, 21)
    print(x)
    y = 1 / (1 + np.e ** (-x))
    print(np.e)
    print(y)

    plt.plot(x, y, 'bx')
    # plt.scatter(x,y,c='b')
    plt.show()


def test3():
    np.random.seed(100)
    x = np.linspace(-10, 10, 21)
    X = x.reshape(-1, 1)
    delta = np.random.randn(x.size)
    print(delta)
    print(x)
    print(X)
    # y = w * x + b
    y_true = 3 * x + 5
    y = 3 * x + 5 + delta * 5
    plt.scatter(x, y)
    plt.plot(x, y_true, 'r', label="y_true=3*x+5")
    model = LinearRegression()
    # 训练模型 训练集
    model.fit(X, y)
    # 结果
    print(model.coef_, model.intercept_)
    w = model.coef_[0]
    b = model.intercept_
    y_predict = w * x + b
    plt.plot(x, y_predict, 'g', label=f'y_pred={w}*x+{b}')

    # 测试集 测试结果好坏
    X_test = np.array([-7.3, -0.1, 2.7]).reshape(-1, 1)
    y_test = np.array([-20, 0, 16])
    s = model.score(X_test, y_test)
    # s=model.score(X,y)
    print("测试分数", s)

    plt.legend()
    plt.show()
    '''pip install scikit-learn -i https://pypi.tuna.tsinghua.edu.cn/simple'''


def test4():
    # 第一步 准备数据集
    features = np.array([[3, 7],
                         [5, 4],
                         [6, 11],
                         [4, 8],
                         [-6, -3],
                         [-5, -14],
                         [-7, -22],
                         [-11, -9]
                         ])
    label = np.array([1, 1, 1, 1, 0, 0, 0, 0])
    print(features)
    # print(features[:,0])
    # x = np.array([3, 5, 6, 4, -6, -5, -7, -11])
    x = features[:, 0]
    print(x)
    # y = np.array([7, 4, 11, 8, -3, -14, -22, -9])
    y = features[:, 1]

    plt.scatter(x, y, c='r')
    #     y = w*x+b
    #     w1*x+w2*y+b=0
    #     y=-(w1*x+b)/w2
    #  逻辑回归 本质是分类 最简单的分类是二分类 它是w1*x+w2*y+b=0 与sigmoid函数的合体
    # 第二步 训练模型
    model = LogisticRegression()
    model.fit(features, label)
    print(model.coef_, )
    print(model.intercept_)
    w1, w2 = model.coef_[0]
    b = model.intercept_[0]
    x_line = np.linspace(-10, 10, 21)
    y_line = -(w1 * x_line + b) / w2
    plt.plot(x_line, y_line, 'r')

    #   第三步 验证模型 测试集
    X_val = np.array([
        [5, 8],
        [-6, -1]
    ])
    y_val = np.array([1, 0])
    r = model.score(X_val, y_val)
    print('r ', r)
    #   第四步  预测某些点的值
    X_pred = np.array([
        [1, 9],
        [-8, 2],
        [-4, 4]
    ])
    y_pred = model.predict_proba(X_pred)
    plt.scatter(X_pred[:, 0], X_pred[:, 1], c='b')
    print(y_pred)
    plt.show()


#     torch 构建神经网络
import torch
from torch import nn


# neural network
class MyLinearNet(nn.Module):

    def __init__(self) -> None:
        super().__init__()
        self.layer = nn.Sequential(
            nn.Linear(1, 1)
            # nn.Linear(1,4),
            # nn.Sigmoid(),
            # nn.Linear(4,3),
            # nn.Sigmoid(),
            # nn.Linear(3,1),
            # nn.Linear(16,1),
        )
        # self.layer = nn.Linear(1,1)

    def forward(self, x):
        return self.layer(x)


def test6():
    torch.manual_seed(888)
    x = torch.linspace(0, 10, 101)
    y_true = 3 * x + 5

    noise = torch.randn(y_true.size()) * 2
    y = y_true + noise

    X = x.reshape(-1, 1)
    Y = y.reshape(-1, 1)
    print(X, X.dtype)
    print(Y, Y.dtype)

    # model = MyLinearNet()
    # Y_predict = model.forward(X)
    # print(Y_predict.size())
    # print(Y_predict.shape)
    # print(Y_predict)
    # plt.plot(X.reshape(-1).detach().numpy(),Y_predict.reshape(-1).detach().numpy(),'g')
    model = MyLinearNet()

    lossfunc = nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
    plt.ion()

    for i in range(100):
        # python __call__
        # 前向传播 求预测值
        Y_predict = model(X)
        # Y_predict = model.forward(X)
        # 根据预测值和真实值 求损失
        loss = lossfunc(Y, Y_predict)

        # 重置 清零 优化器
        optimizer.zero_grad()
        # 损失值反向传播
        loss.backward()
        # 更新权重 更新模型参数 W B
        optimizer.step()

        plt.cla()
        plt.xlim(-2, 12)
        plt.ylim(-2, 40)
        plt.plot(x, y_true, c='b')
        plt.scatter(x, y, c='g')
        print(next(model.parameters()))
        print(next(model.parameters()))
        # print("Model weights:", model.layer[0].weight.item())  # 注意：由于是单特征，所以权重是一个数，不是向量。偏置也是一个数。
        # print("Model bias:", model.layer[0].bias.item())
        plt.plot(X.reshape(-1).detach(), Y_predict.reshape(-1).detach(), c='r')
        plt.text(0, 0, f'loss : {loss.item():.4f}')
        plt.pause(1)

    plt.show()


if __name__ == '__main__':
    test6()
