import pandas as pd
import torch
import numpy as np
from torch import nn
from d2l import torch as d2l
from sklearn.preprocessing import StandardScaler,MinMaxScaler
#代码在实验过程当中经过多次改动，并非最佳实验结果的代码
#数据预处理
def load_data():
    train_data = pd.read_csv('FinalProject\\data\\train.csv')
    test_data = pd.read_csv('FinalProject\\data\\test.csv')
    all_features = pd.concat((train_data.iloc[:,1:-7],test_data.iloc[:,1:]))
    #rabbage_labels = ['Pixels_Areas','X_Perimeter','Y_Perimeter','Sum_of_Luminosity','Outside_X_Index','Edges_Y_Index']
    #all_features = all_features.drop(columns=rabbage_labels)
    std_scaler = StandardScaler()
    std_features = std_scaler.fit_transform(pd.concat((all_features.iloc[:,:11],all_features.iloc[:,13:]),axis=1))
    mm_scaler = MinMaxScaler()
    mm_features = mm_scaler.fit_transform(std_features)
    all_features = np.concatenate((mm_features[:,:11],all_features.iloc[:,11:13].values,mm_features[:,11:]),axis=1)
    train_cnt = train_data.shape[0]

    train_features = torch.tensor(all_features[:train_cnt], dtype=torch.float32)
    test_features = torch.tensor(all_features[train_cnt:],dtype=torch.float32)
    labels = ['Pastry','Z_Scratch','K_Scatch','Stains','Dirtiness','Bumps','Other_Faults']
    train_labels = train_data[labels]
    #8分类解决无缺陷标号
    #all_zeros_idx = (train_labels == 0).all(axis=1)
    #train_labels_ = torch.argmax(torch.tensor(train_labels.values,dtype=torch.float32),dim=1)
    train_labels_ = torch.tensor(train_labels.values,dtype=torch.float32)
    #train_labels_[all_zeros_idx] = 7
    return train_features,test_features,train_labels_,labels

train_features, test_features, train_labels, labels = load_data()
print('训练特征',train_features.shape)
print(train_features)
print('测试特征',test_features.shape)
print(test_features)
print('训练标签',train_labels.shape)
print(train_labels)
in_features_cnt = train_features.shape[1]
out_labels_cnt = 7

sample_sub = pd.read_csv('FinalProject\\data\\sample_submission.csv')

net = nn.Sequential(nn.Linear(in_features_cnt,256),
                    nn.ReLU(),
                    nn.Linear(256,128),
                    nn.ReLU(),
                    nn.Linear(128,64),
                    nn.ReLU(),
                    nn.Linear(64,out_labels_cnt))

num_epochs = 1000
lr = 0.0005
weight_decay = 1e-4
batch_size = 128

loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(),lr=lr,weight_decay=weight_decay)
device = torch.device('cuda:0')

#返回预测正确的个数
def acc_cnt(y_hat,y):
    max_prob_idx_y_hat = torch.argmax(y_hat,dim=1)
    cmp = y == max_prob_idx_y_hat
    return float(cmp.sum())

def train(net,num_epochs,device):
    def init_weights(m):
        if type(m) == nn.Linear:
            nn.init.xavier_normal_(m.weight)
    net.apply(init_weights)
    print("train on",device)
    net.to(device)
    animator = d2l.Animator(xlabel='epoch',xlim=[1,num_epochs],
                            legend=['train_loss','train_acc'])
    train_iter = d2l.load_array((train_features,train_labels),batch_size)
    timer,num_batches = d2l.Timer(),len(train_iter)
    for epoch in range(num_epochs):
        metric = d2l.Accumulator(2)
        net.train()
        for i,(X,y) in enumerate(train_iter):
            timer.start()
            optimizer.zero_grad()
            X,y = X.to(device),y.to(device)
            y_hat = net(X)
            l = loss_func(y_hat,y)
            l.backward()
            optimizer.step()
            metric.add(l*X.shape[0],y.shape[0])
            #metric.add(l*X.shape[0],acc_cnt(y_hat,y),y.shape[0])
            timer.stop()
            train_l = metric[0]/metric[1]
            #train_acc = metric[1]/metric[2]
            if (i+1)%(num_batches//5) == 0 or i==num_batches - 1:
                animator.add(epoch+(i+1)/num_batches,(train_l,None))
                #animator.add(epoch+(i+1)/num_batches,(train_l,train_acc,None))
    print("loss:",train_l)
    #print("train_acc:",train_acc)
    print(metric[1]*num_epochs/timer.sum(),"examples/s on",device)
    d2l.plt.show()

def train_and_predict():
    train(net,num_epochs,device)
    net.to(torch.device('cpu'))
    preds = net(test_features)
    #preds = torch.softmax(preds,dim=1)
    #输出最终概率
    preds = pd.DataFrame(preds.detach().numpy())
    sample_sub[labels] = preds
    #sample_sub[labels] = preds.drop(preds.columns[-1],axis=1)
    sample_sub.to_csv('FinalProject\\data\\submission.csv',index=False)

train_and_predict()



