import numpy as np
from matplotlib import pyplot as  plt
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
 
import torch
from torch import nn
from torch.nn import functional as F
 
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
X,y = make_regression(n_samples=506,n_features=13)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=1)
y_train = y_train.reshape((-1,1))
y_test = y_test.reshape((-1,1))
mean_ = X_train.mean(axis=0)
std_ = X_train.std(axis=0)
print(mean_.shape)
print(X_test.shape)
X_train= (X_train - mean_)/std_
X_test = (X_test - mean_)/std_
 
class MyDataset(Dataset):
    def __init__(self,X,y):
        self.X = X
        self.y = y
    def __getitem__(self, idx):
        x = self.X[idx]
        y = self.y[idx]
        return torch.tensor(x).float(),torch.tensor(y).float()
    def __len__(self):
        return len(self.X)
train_dataset = MyDataset(X=X_train,y=y_train)
#print(train_dataset[0])
train_dataloader = DataLoader(dataset=train_dataset,batch_size=32,shuffle=True,drop_last=True)
#for X,y in train_dataloader:
#    print(X.shape,y.shape)
 
test_dataset = MyDataset(X=X_test,y=y_test)
test_dataloader = DataLoader(dataset=test_dataset,batch_size=32,shuffle=True,drop_last=False)
class LinearRegression(nn.Module):
    def __init__(self,in_features,out_features):
        super(LinearRegression,self).__init__() #一个子类继承一个父类，所以子类在构建的时候，要先构建一个父类。此行代码写法固定。
        self.linear1 = nn.Linear(in_features=in_features,out_features=1)
        self.linear2 = nn.Linear(in_features=1,out_features=out_features)
 
    def forward(self,x):
        h1 = self.linear1(x)
        h2 = F.relu(h1)
        out2 = self.linear2(h2)
        return out2
lr = LinearRegression(in_features=13,out_features=1)
print(lr.parameters())
#for ele in lr.parameters():
#    print(ele)
 
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(params=lr.parameters(),lr=1e-3) #11.10
 
def get_loss(dataloader=train_dataloader,model=lr,loss_fn=loss_fn):
    with torch.no_grad():
        losses = []
        for X,y in dataloader:
            loss = loss_fn(model(X),y)
            losses.append(loss)
        return sum(losses)/len(losses)
 
 
def train(model=lr,dataloader=train_dataloader,optimizer=optimizer,loss_fn=loss_fn,epochs=20):
    for epoch in range(1,epochs+1):
        for X,y in train_dataloader:
            loss = loss_fn(model(X),y)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
        print(f"当前是第{epoch}轮，训练集的误差为：{get_loss(dataloader=train_dataloader)},测试集的误差为：{get_loss(dataloader=test_dataloader)}")
 
train()
#print(lr.state_dict())
#print(len(lr.state_dict()["linear.weight"][0]))
threshhold=0.05
def predict(model,X):
    with torch.no_grad():
        y_pred = model(X)
        label = (y_pred>threshhold).float()
    return label
for X,y in test_dataloader:
    print(predict(lr,X))
 
