import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
from torch import nn
import os

from model import Model

T_STEP = 128
N_FEAT = 5
BATCH = 64

def read_folder(dirname):
    X_data = []
    from tqdm import tqdm
    for filename in tqdm(os.listdir(dirname)):
        try:
            df = pd.read_csv(os.path.join(dirname, filename))
        except pd.errors.EmptyDataError:
            continue
        x = np.float32(df[["Open", "High", "Low", "Close", "Volume"]])
        x = np.log(x[np.all(x > 0, 1)])
        base = x[:-1].copy()
        base[..., :4] = base[..., 3:4]
        x = x[1:] - base
        if len(x) > T_STEP:
            X_data.append(x)
    return X_data

def read_A(dirname):
    X_data = []
    from tqdm import tqdm
    for filename in tqdm(os.listdir(dirname)):
        try:
            df = pd.read_csv(os.path.join(dirname, filename))
        except pd.errors.EmptyDataError:
            continue
        x = np.float32(df[["open", "high", "low", "close", "volume"]])
        x = np.log(x[np.all(x > 0, 1)])
        base = x[:-1].copy()
        base[..., :4] = base[..., 3:4]
        x = x[1:] - base
        if len(x) > T_STEP:
            X_data.append(x)
    return X_data

def get_data():
    X_test = read_A("A")
    X_data = read_folder("Data/ETFs") + read_folder("Data/Stocks")
    MEAN = np.concatenate(X_data).mean(0)
    STD = np.concatenate(X_data).std(0)
    for x in X_data:
        x[:] = (x - MEAN) / STD
    for x in X_test:
        x[:] = (x - MEAN) / STD
    np.savez_compressed("data.npz", [X_data, X_test])
    np.savez_compressed("tr.npz", mean=MEAN, std=STD)
    print(MEAN)
    print(STD)

if not os.path.exists("data.npz"):
    get_data()
X_data, X_test = np.load("data.npz", allow_pickle=True)['arr_0']

def getBatch(X_data, batch_size=BATCH, t_step=T_STEP, n_feat=N_FEAT):
    prob = np.array([x.shape[0] for x in X_data])
    X_batch = np.zeros((batch_size, t_step, n_feat), dtype=np.float32)
    y_batch = np.zeros((batch_size, t_step, n_feat), dtype=np.float32)
    etf_ids = np.random.choice(range(len(prob)), batch_size, True, prob / prob.sum())
    for i, etf_idx in enumerate(etf_ids):
        x = X_data[etf_idx]
        f = np.random.randint(x.shape[0] - t_step)
        X_batch[i] = x[f:f+t_step]
        y_batch[i] = x[f+1:f+t_step+1]
    return torch.from_numpy(X_batch), torch.from_numpy(y_batch)


m = Model().to('cuda')
optim = torch.optim.SGD(m.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-3)
schd = torch.optim.lr_scheduler.MultiStepLR(optim, [8000], 0.1)

# import wandb
# wandb.init(project='stockml', entity='yuangzh')
from tensorboardX import SummaryWriter
writer = SummaryWriter(flush_secs=30)

loss_cum = 0
test_batches = [getBatch(X_test) for _ in range(10)]
for i in range(10000):
    x, y = getBatch(X_data)
    d, hold = m(x.to('cuda'))
    loss = d.log_prob(y.to('cuda')).mean((0, 1))
    loss_cum += loss
    optim.zero_grad()
    torch.mean(-loss).backward()
    optim.step()
    schd.step()
    if i and i % 10 == 0:
        m.eval()
        with torch.no_grad():
            val_loss = 0
            for x, y in test_batches:
                d = m(x.to('cuda'))
                val_loss += d.log_prob(y.to('cuda')).mean((0, 1))
            loss_cum.div_(10)
            val_loss.div_(10)
            info = {
                "Train/log_prob": loss_cum.mean().item(),
                "Val/log_prob": val_loss.mean().item(),
            }
            print(info)
            for k, v in info.items():
                writer.add_scalar(k, v, i)
        loss_cum = 0
        m.train()
torch.save(m.state_dict(), "model.pt")
