import os
import numpy as np
import h5py
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from y import Y
import logging
logging.captureWarnings(True)
logging.disable(logging.CRITICAL)



# from pytorch ignite
def r2_score_compute_fn(y_pred, y_truth):
  e = torch.sum((y_truth - y_pred)**2) / torch.sum((y_truth - y_truth.mean())**2)
  return 1 - e.item()


is_price_unit = np.array([1,1,1,0,0,0,0,0,0,1,1,1,1,1,0,1,0,1,0,1,0,1,0,1,0,0,0], dtype=np.float32)

class H5Dataset(torch.utils.data.Dataset):
    def __init__(self, h5_path, augment=False):
        self.h5_path = h5_path
        self.h5_file = None  # should be opened in each process
        self.x = None
        self.y = None
        self.length = None
        self.cin = None
        self.cout = None
        self.augment = augment

        self.init()

    def init(self):
        self.h5_file = h5py.File(self.h5_path, 'r')
        self.x = self.h5_file['x'][...].astype(np.float32)
        self.y = self.h5_file['y'][...].astype(np.float32)

        # mtl?
        self.y = np.expand_dims(self.y[:, Y], 1)

        self.length = len(self.x)
        self.cin = self.x.shape[1]
        self.cout = self.y.shape[1]
        # self.cout = 1
    
    def __getitem__(self, index):
        # if self.h5_file is None:
        #     self.init()

        x = self.x[index].copy()  # multi product features
        y = (self.y[index] - y_mean) / y_std

        # x
        # 0 0 0 0 0 0 - r1 r1 0 r1 r1 
        # 2 products only aug
        if self.augment:
            noise_amp = x[27] / 3.0
            x[27:] += np.random.random() * noise_amp * is_price_unit        

        return (x, y)

    def __len__(self):
        return self.length


class Net(pl.LightningModule):
    def __init__(self):
        super().__init__()
        df = H5Dataset('out/feature/test.h5')
        # self.a = torch.nn.Linear(df.cin, df.cout)
        self.register_buffer('x_mean', torch.Tensor(x_mean))
        self.register_buffer('x_std', torch.Tensor(x_std))
        self.register_buffer('y_mean', torch.Tensor(y_mean))
        self.register_buffer('y_std', torch.Tensor(y_std))
        #                                                   0 1 2            9 10 111213 15  17  19  21  23
        self.register_buffer('is_price_unit', torch.FloatTensor(is_price_unit))
        self.first = nn.Linear(df.cin, 60)
        self.mid = nn.ModuleList()
        for i in range(4):
            self.mid.append(nn.Linear(60, 60))
        self.last = nn.Linear(60, df.cout)

    def forward(self, x):
        x = (x - self.x_mean.unsqueeze(0)) / self.x_std.unsqueeze(0)
        x = self.first(x)
        for l in self.mid:
            x = F.selu(l(x))
        x = self.last(x)
        return x
    
    def training_step(self, batch, batch_idx):
        x, y = batch
        y_pred = self(x)

        loss = F.mse_loss(input=y_pred, target=y)
        tensorboard_logs = {'train_loss': loss}
        return {'loss': loss, 'log': tensorboard_logs}

    def configure_optimizers(self):
        opt = torch.optim.SGD(self.parameters(), lr=0.001)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt)
        return [opt], [scheduler]

    def train_dataloader(self):
        df = H5Dataset('out/feature/train.h5', augment=False)
        loader = torch.utils.data.DataLoader(df, batch_size=256, num_workers=0, shuffle=True, drop_last=False)
        return loader

    def validation_step(self, batch, batch_idx):
        x, y = batch
        # y = y[:,Y]
        # y_pred = self(x)[:,Y]
        y_pred = self(x)
        return {'val_loss': F.mse_loss(input=y_pred, target=y, reduction='sum'), 'y_truth': y, 'y_pred': y_pred}

    def validation_epoch_end(self, outputs):
        total_loss = sum([x['val_loss'] for x in outputs])

        y_truth = torch.cat([x['y_truth'] for x in outputs], dim=0)
        y_pred = torch.cat([x['y_pred'] for x in outputs], dim=0)
        avg_loss = total_loss / len(y_pred)

        r2score = r2_score_compute_fn(y_pred=y_pred, y_truth=y_truth)

        tensorboard_logs = {'val_loss': avg_loss, 'r2score': r2score}
        return {'val_loss': avg_loss, 'log': tensorboard_logs}

    def val_dataloader(self):
        df = H5Dataset('out/feature/val.h5')
        loader = torch.utils.data.DataLoader(df, batch_size=256, num_workers=0)
        return loader

    def test_step(self, batch, batch_idx):
        x, y = batch
        # y = y[:,Y]
        # y_pred = self(x)[:,Y]
        y_pred = self(x)
        return {'test_loss': F.mse_loss(input=y_pred, target=y, reduction='sum'), 'y_truth': y, 'y_pred': y_pred}

    def test_epoch_end(self, outputs):
        total_loss = sum([x['test_loss'] for x in outputs])

        y_truth = torch.cat([x['y_truth'] for x in outputs], dim=0)
        y_pred = torch.cat([x['y_pred'] for x in outputs], dim=0)
        avg_loss = total_loss / len(y_pred)
       
        r2score = r2_score_compute_fn(y_pred=y_pred, y_truth=y_truth)

        tensorboard_logs = {'test_loss': avg_loss, 'r2score': r2score}
        self.last_test_l2 = avg_loss.item()
        self.last_test_r2 = r2score
        return {'test_loss': avg_loss, 'log': tensorboard_logs}

    def test_dataloader(self):
        df = H5Dataset('out/feature/test.h5')
        loader = torch.utils.data.DataLoader(df, batch_size=256, num_workers=0)
        return loader


def get_statistics():
    df = H5Dataset('out/feature/train.h5')
    x_mean = df.x.mean(axis=0)  # [N,C] => [C]
    x_std = df.x.std(axis=0)  # [C]
    y_mean = df.y.mean(axis=0)  # [N,C] => [C]
    y_std = df.y.std(axis=0)  # [C], 0 to 1 to avoid divide by zero
    y_std[y_std==0] = 1.0
    return x_mean, x_std, y_mean, y_std

x_mean, x_std, y_mean, y_std = get_statistics()
model = Net()

# checkpoint_callback = ModelCheckpoint(
#     filepath='out/models/',
#     save_top_k=True,
#     verbose=True,
#     monitor='val_loss',
#     mode='min',
#     prefix=''
# )

# trainer = pl.Trainer(max_epochs=100, checkpoint_callback=checkpoint_callback)
trainer = pl.Trainer(max_epochs=100, show_progress_bar=False, progress_bar_callback=False, progress_bar_refresh_rate=0)
trainer.fit(model)
trainer.test()

print('r2', model.last_test_r2)
print('l2', model.last_test_l2)

# save to c++ model
def export():
    loader = model.test_dataloader()
    x, y = next(iter(loader))  # batch
    traced = torch.jit.trace(model, x)
    os.makedirs('out/models', exist_ok=True)
    traced.save(f"out/models/y_{Y}.pt")

export()
