#泰坦尼克号存活概率
#12个属性 一共有892个样本
#乘客id 是否存活 船舱等级 姓名 性别 年龄 兄弟姐妹数量 直系亲属数量 购票 花费
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from torch.utils.data import DataLoader


# 第一步 准备数据集
class TitanicDataset(Dataset):
    def __init__(self, filepath):
        #features特征部分
        features = ["Pclass", 'Age', "SibSp", "Parch", "Fare"]
        #默认会将CSV文件第一行读取为数据的列名
        data = pd.read_csv(filepath)
        data = data.fillna(data['Age'].mean())  # 年龄这一特征有缺失值，通过求均值，并填充到NAN
        self.len = data.shape[0]
        #将数据的特定列转为pytorch张量 特定列为我们上面给出的features数组
        self.x_data = torch.from_numpy(np.array(data[features]))

        self.y_data = torch.from_numpy(np.array(data["Survived"]))

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len






# class TiTanicDataset(Dataset):
#     def __init__(self,filepath):
#         xy = np.loadtxt(filepath,delimiter=',',dtype=np.float32)
#         self.len = xy.shape[0]
#         #取第二列所有数据 因为我们的目标值y存活率在第二列
#         self.y_data = torch.from_numpy(xy[:,[1]])
#         #取第一列所有数据
#         self.x_data1 = torch.from_numpy(xy[:, 0:1])
#         #取第三列到最后一列所有数据
#         self.x_data2 = torch.from_numpy(xy[:,2:])
#         #按行合并
#         self.x_data = np.concatenate(self.x_data1,self.x_data2)
#
#     def __getitem__(self,index):
#         return self.x_data[index],self.y_data[index]
#
#     def __len__(self):
#         return self.len
dataset = TitanicDataset("dataset/train.csv")
train_loader = DataLoader(dataset=dataset,batch_size=32,shuffle=True)

class Model(torch.nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.linear2 = torch.nn.Linear(5, 4)
        self.linear3 = torch.nn.Linear(4, 3)
        self.linear4 = torch.nn.Linear(3, 1)
        self.sigmoid = torch.nn.Sigmoid()
    def forward(self,x):
        x = self.sigmoid(self.linear2(x))
        x = self.sigmoid(self.linear3(x))
        x = self.sigmoid(self.linear4(x))
        return x;
    def predict(self, x):  # 该函数用在测试集过程，因此只有前向传播，没有什么
        with torch.no_grad():
            x = self.sigmoid(self.linear2(x))
            x = self.sigmoid(self.linear3(x))
            x = self.sigmoid(self.linear4(x))
            y = []  # 将测试的结果都汇集到这个列表中
            for i in x:
                if i > 0.5:
                    y.append(1)
                else:
                    y.append(0)
            return y
model = Model()
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)
if __name__ == '__main__':
    for epoch in range(100):
        for i,data in enumerate(train_loader,0):
            inputs,labels = data
            inputs = inputs.float()
            labels = labels.float()
            labels = labels.view(-1,1)
            y_pred = model(inputs)
            loss = criterion(y_pred,labels)
            print(epoch,i,loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

#torch.save(model,"myModel")
# 第四步 测试
test_data = pd.read_csv('dataset/test.csv')
test_data = test_data.fillna(test_data['Age'].mean())
features = ["Pclass", "Age", "SibSp", "Parch", "Fare"]
test = torch.from_numpy(np.array(test_data[features]))


y = model.predict(test.float())
out = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y})
print(out['Survived'].sum())  # 存活人数
out.to_csv('predict/my_first_predict.csv', index=False)  # csv文件会保存在相应的文件夹里