import pandas as pd
import torch

from fastai.tabular import *
from losses import *
import matplotlib.pyplot as plt


def get_model(in_freatures,out_features,n_layers=1,model_state=None):
    layers=[nn.Linear(in_freatures,10),nn.LeakyReLU(),]
    for i in range(n_layers):
        layers = layers + [nn.Linear(10,10),nn.LeakyReLU(),]
    layers = layers + [nn.Linear(10,out_features)]
    model = nn.Sequential(*layers)
    if not (model_state is None):
        model.load_state_dict(model_state)
    return model


def get_ds(ds_name,fold,root,bs=64):
    p = Path(root)
    train_set= list((p/ds_name).glob(f'{ds_name}_train*_{fold}.csv'))
    assert len(train_set)==1
    train_set=torch.Tensor(pd.read_csv(train_set[0],header=None).values)
    test_set = list((p / ds_name).glob(f'{ds_name}_test*_{fold}.csv'))
    assert len(test_set) == 1
    test_set = torch.Tensor(pd.read_csv(test_set[0], header=None).values)
    return train_set,test_set

def get_dl(train_set,bs=64):
    return DataLoader(train_set,batch_size=bs)

def evaluate(model,loss_func,ds):
    tx,ty=ds[:,:-1],ds[:,-1].long()
    #tx,ty=tx,ty
    ex = model(tx)
    loss = loss_func(ex,ty)

    return loss,accuracy(loss_func(ex),ty)


def train(model, loss_func, train_dl, test_ds, lr=1e-3, epochs=1000, verbose=50):
    arch = nn.Sequential(model, loss_func)
    opt = optim.Adam(arch.parameters(), lr,)
    arch.train()
    recorder = []
    train_recorder = []
    for epoch in range(epochs):

        for block in train_dl:
            loss, acc = evaluate(model, loss_func, block)
            opt.zero_grad()
            loss.backward()
            opt.step()
            train_recorder.append([loss.item(), acc.item()])
            if (epoch + 1) % verbose == 0:
                arch.eval()
                with torch.no_grad():
                    test_loss, test_acc = evaluate(model, loss_func, test_ds)
                # print(f"train {loss:.1f},{acc:.3f};test {test_loss:.1f},{test_acc:.3f}")
                arch.train()

                recorder.append([test_loss.item(), test_acc.item()])

    #     opt.zero_grad()
    return train_recorder, recorder

def experiment(args,ds_name,exps=3):
    for exp in range(exps):
        ans=[]
        for i in range(10):
            file = Path(f'exps/{ds_name}/{args.loss}_{exp}_{i}.csv')
            if file.exists():
                continue
            train_ds,test_ds = get_ds(ds_name, i, root='data/DataSet')
            features=train_ds.size(1)-1
            n_classes = int(train_ds[:,-1].max().item())+1
            hidden=10

            loss_func = get_loss(args.loss,hidden,n_classes,args)
            model = get_model(features,hidden)
            train_recorder, recorder=train(model,loss_func,get_dl(train_ds,135),test_ds)
            df=pd.DataFrame(recorder,columns=['loss','accuracy'])

            (file/'..').mkdir(parents=True,exist_ok=True)
            df.to_csv(file,)
            # print(recorder[-1])
            ans.append(recorder[-1])

            # plt.plot(train_recorder)
            # plt.show()
        if ans:
            ans = np.array(ans)

            print(args.loss,ds_name,ans.mean(0))


import easydict
if __name__ == '__main__':
    args = easydict.EasyDict()
    for ds in ['AustralianCreditApproval',
                 'BalanceScale',
                 'BanknoteAuthentication',
                 'BreastCancerWisconsinPrognostic',
                 'BreastTissue',
                 'CarEvaluation',
                 'ClimateModelSimulationCrashes',
                 'cmc',
                 'CongressionalVotingRecords',
                 'ForestTypeMapping',
                 'haberman',
                 # 'Hayes-Roth',
                 # 'ionosphere',
                 'iris',
                 'Parkinsons',
                 'PimaIndiansDiabetes',
                 'PlanningRelax',
                 'QualitativeBankruptcy',
                 'risk_factors_cervical_cancer',
                 'Seeds',
                 # 'segment',
                 'SPECT-heart',
                 'TeachingAssistantEvaluation',
                 'UserKnowledgeModeling',
                 'VertebralColumn',
                 # 'wbcd',
                 'WebsitePhishing',
                 'WholesaleCustomers',
                 'wine',
                 # 'zoo'
               ]:
        for loss in ['arc1','arc2','soft',]:
            args.s=10
            args.m=0.3
            args.loss=loss
            experiment(args,ds,1)


