from prefect import flow, task
import time
import torch
import torch.nn as nn
import pandas as pd
pd.options.mode.chained_assignment = None
import math
import os
import datetime as dt
from sqlalchemy import create_engine
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset, DataLoader
from tensorboard import program

class ForecastModel(nn.Module):
    def __init__(self, input_size, output_size):
        super().__init__()
        self.linear1 = nn.Linear(input_size, 64)
        self.activation1 = nn.Tanh()
        self.linear2 = nn.Linear(64, 64)
        self.activation2 = nn.ReLU()
        self.linear3 = nn.Linear(64, 64)
        self.activation3 = nn.Tanh()
        self.linear4 = nn.Linear(64, 64)
        self.activation4 = nn.ReLU()
        self.linear5 = nn.Linear(64, 64)
        self.activation5 = nn.Tanh()
        self.linear6 = nn.Linear(64, output_size)
    def forward(self, x):
        x = self.linear1(x)
        x = self.activation1(x)
        x = self.linear2(x)
        x = self.activation2(x)
        x = self.linear3(x)
        x = self.activation3(x)
        x = self.linear4(x)
        x = self.activation4(x)
        x = self.linear5(x)
        x = self.activation5(x)
        x = self.linear6(x)
        return x
class CreateDataset(Dataset):
    def __init__(self, x_numpy, y_numpy):
        self.x = torch.tensor(x_numpy, dtype=torch.float32)
        temp = torch.tensor(y_numpy, dtype=torch.float32)
        self.y = temp.view(temp.shape[0], 1)
        self.n_samples = x_numpy.shape[0]
    def __getitem__(self, index):
        return self.x[index], self.y[index]
    def __len__(self):
        return self.n_samples

@task()
def pytorchcsforecast_task(j, dataset, cuda0, writer, i, s):
    time.sleep((j-1)*10)
    print(f'Кластер {j}')
    dataset_cluster = dataset[dataset['ClusterId'] == j].copy()
    dataset_cluster_after_frac = dataset_cluster.sample(frac=1)
    x_numpy = dataset_cluster_after_frac.drop(columns=['DateCode', 'StoreCode', 'ItemCode', 'ColorCode', 'ClusterId', 'Sales']).to_numpy()
    y_numpy = dataset_cluster_after_frac['Sales'].to_numpy()
    with torch.cuda.stream(s):
        train_ds = CreateDataset(x_numpy[:math.ceil(len(x_numpy)*0.9)], y_numpy[:math.ceil(len(y_numpy)*0.9)])
        train_ds.x = train_ds.x.to(cuda0, non_blocking=True)
        train_ds.y = train_ds.y.to(cuda0, non_blocking=True)
        test_ds = CreateDataset(x_numpy[math.ceil(len(x_numpy)*0.9):], y_numpy[math.ceil(len(y_numpy)*0.9):])
        test_ds.x = test_ds.x.to(cuda0, non_blocking=True)
        test_ds.y = test_ds.y.to(cuda0, non_blocking=True)
        full_ds = CreateDataset(x_numpy, y_numpy)
        model = ForecastModel(x_numpy.shape[1], 1).to(cuda0)
        criterion = nn.MSELoss().to(cuda0)
        optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
        patience = 5000
        print(f'torch.cuda.memory_allocated() = {torch.cuda.memory_allocated()}')
        for epoch in range(200000):
            model.train()
            y_predicted = model(train_ds.x)
            loss = criterion(y_predicted, train_ds.y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            model.eval()
            curr_loss = criterion(model(test_ds.x).detach(), test_ds.y).item()
            if epoch == 0:
                ampl = 0
                dvizh = 0
                last_loss = curr_loss
            else:
                ampl += abs(curr_loss - last_loss)
                dvizh = abs(curr_loss - last_loss)
                last_loss = curr_loss
            writer.add_scalar(f'{i[0]}/cluster_{j}', curr_loss, epoch+1)
            writer.flush()
            patience -= 1
            if epoch == 0 or curr_loss < min_loss:
                min_loss = curr_loss
                min_ampl = ampl
                patience = 5000
                torch.save(model, str(i[0])[-3:]+'_'+str(j)+'.pth')
            if patience == 0 or ampl > 5000 or (epoch > 5000 and dvizh / curr_loss > 0.01):
                break
    best_model = torch.load(str(i[0])[-3:]+'_'+str(j)+'.pth')
    best_model.eval().to('cpu')
    data_final = dataset_cluster_after_frac.copy()
    data_final['Sales_Predicted'] = best_model(full_ds.x).detach().numpy()
    best_loss = min_loss
    best_ampl = min_ampl
    writer.add_hparams({'Store': int(str(i[0])[-3:])}, {'Cluster': j, 'Best_ampl': best_ampl, 'Loss': best_loss})
    writer.flush()
    os.replace('F:/PyCharm/PerfectOrchestrator/DAGs/'+str(i[0])[-3:]+'_'+str(j)+'.pth', 'F:/PyCharm/RL/3) Прогнозирование продаж/Модели/'+str(i[0])[-3:]+'_'+str(j)+'.pth')
    data_final.to_excel(f'F:/PyCharm/RL/3) Прогнозирование продаж/Результаты/Магазины/{i[0]}/Прогноз_{j}.xlsx', index=False)
@flow(log_prints=True)
def pytorchcsforecast(dataset, cuda0, writer, i, list_of_streams):
    pytorchcsforecast_task.submit(1, dataset, cuda0, writer, i, list_of_streams[0])
    pytorchcsforecast_task.submit(2, dataset, cuda0, writer, i, list_of_streams[1])
    pytorchcsforecast_task.submit(3, dataset, cuda0, writer, i, list_of_streams[2])
    pytorchcsforecast_task.submit(4, dataset, cuda0, writer, i, list_of_streams[3])
    pytorchcsforecast_task.submit(5, dataset, cuda0, writer, i, list_of_streams[4])
    pytorchcsforecast_task.submit(6, dataset, cuda0, writer, i, list_of_streams[5])
    pytorchcsforecast_task.submit(7, dataset, cuda0, writer, i, list_of_streams[6])
    pytorchcsforecast_task.submit(8, dataset, cuda0, writer, i, list_of_streams[7])
    pytorchcsforecast_task.submit(9, dataset, cuda0, writer, i, list_of_streams[8])
    pytorchcsforecast_task.submit(10, dataset, cuda0, writer, i, list_of_streams[9])
    pytorchcsforecast_task.submit(11, dataset, cuda0, writer, i, list_of_streams[10])
    pytorchcsforecast_task.submit(12, dataset, cuda0, writer, i, list_of_streams[11])
    pytorchcsforecast_task.submit(13, dataset, cuda0, writer, i, list_of_streams[12])
    pytorchcsforecast_task.submit(14, dataset, cuda0, writer, i, list_of_streams[13])
    pytorchcsforecast_task.submit(15, dataset, cuda0, writer, i, list_of_streams[14])
    pytorchcsforecast_task.submit(16, dataset, cuda0, writer, i, list_of_streams[15])
    pytorchcsforecast_task.submit(17, dataset, cuda0, writer, i, list_of_streams[16])
    pytorchcsforecast_task.submit(18, dataset, cuda0, writer, i, list_of_streams[17])
    pytorchcsforecast_task.submit(19, dataset, cuda0, writer, i, list_of_streams[18])
    pytorchcsforecast_task.submit(20, dataset, cuda0, writer, i, list_of_streams[19])
    pytorchcsforecast_task.submit(21, dataset, cuda0, writer, i, list_of_streams[20])
    pytorchcsforecast_task.submit(22, dataset, cuda0, writer, i, list_of_streams[21])
    pytorchcsforecast_task.submit(23, dataset, cuda0, writer, i, list_of_streams[22])
    pytorchcsforecast_task.submit(24, dataset, cuda0, writer, i, list_of_streams[23])
    pytorchcsforecast_task.submit(25, dataset, cuda0, writer, i, list_of_streams[24])
    pytorchcsforecast_task.submit(26, dataset, cuda0, writer, i, list_of_streams[25])
    pytorchcsforecast_task.submit(27, dataset, cuda0, writer, i, list_of_streams[26])
    pytorchcsforecast_task.submit(28, dataset, cuda0, writer, i, list_of_streams[27])
    pytorchcsforecast_task.submit(29, dataset, cuda0, writer, i, list_of_streams[28])
    pytorchcsforecast_task.submit(30, dataset, cuda0, writer, i, list_of_streams[29])
    pytorchcsforecast_task.submit(31, dataset, cuda0, writer, i, list_of_streams[30])
    pytorchcsforecast_task.submit(32, dataset, cuda0, writer, i, list_of_streams[31])
    pytorchcsforecast_task.submit(33, dataset, cuda0, writer, i, list_of_streams[32])
    pytorchcsforecast_task.submit(34, dataset, cuda0, writer, i, list_of_streams[33])
    pytorchcsforecast_task.submit(35, dataset, cuda0, writer, i, list_of_streams[34])
    pytorchcsforecast_task.submit(36, dataset, cuda0, writer, i, list_of_streams[35])
    pytorchcsforecast_task.submit(37, dataset, cuda0, writer, i, list_of_streams[36])
    pytorchcsforecast_task.submit(38, dataset, cuda0, writer, i, list_of_streams[37])
    pytorchcsforecast_task.submit(39, dataset, cuda0, writer, i, list_of_streams[38])
    pytorchcsforecast_task.submit(40, dataset, cuda0, writer, i, list_of_streams[39])
    pytorchcsforecast_task.submit(41, dataset, cuda0, writer, i, list_of_streams[40])
    pytorchcsforecast_task.submit(42, dataset, cuda0, writer, i, list_of_streams[41])
    pytorchcsforecast_task.submit(43, dataset, cuda0, writer, i, list_of_streams[42])
    pytorchcsforecast_task.submit(44, dataset, cuda0, writer, i, list_of_streams[43])
    pytorchcsforecast_task.submit(45, dataset, cuda0, writer, i, list_of_streams[44])
    pytorchcsforecast_task.submit(46, dataset, cuda0, writer, i, list_of_streams[45])
    pytorchcsforecast_task.submit(47, dataset, cuda0, writer, i, list_of_streams[46])
    pytorchcsforecast_task.submit(48, dataset, cuda0, writer, i, list_of_streams[47])
    pytorchcsforecast_task.submit(49, dataset, cuda0, writer, i, list_of_streams[48])
    pytorchcsforecast_task.submit(50, dataset, cuda0, writer, i, list_of_streams[49])
    pytorchcsforecast_task.submit(51, dataset, cuda0, writer, i, list_of_streams[50])
    pytorchcsforecast_task.submit(52, dataset, cuda0, writer, i, list_of_streams[51])
    pytorchcsforecast_task.submit(53, dataset, cuda0, writer, i, list_of_streams[52])
    pytorchcsforecast_task.submit(54, dataset, cuda0, writer, i, list_of_streams[53])
    pytorchcsforecast_task.submit(55, dataset, cuda0, writer, i, list_of_streams[54])
    pytorchcsforecast_task.submit(56, dataset, cuda0, writer, i, list_of_streams[55])
    pytorchcsforecast_task.submit(57, dataset, cuda0, writer, i, list_of_streams[56])
    pytorchcsforecast_task.submit(58, dataset, cuda0, writer, i, list_of_streams[57])
    pytorchcsforecast_task.submit(59, dataset, cuda0, writer, i, list_of_streams[58])
    pytorchcsforecast_task.submit(60, dataset, cuda0, writer, i, list_of_streams[59])
    pytorchcsforecast_task.submit(61, dataset, cuda0, writer, i, list_of_streams[60])
    pytorchcsforecast_task.submit(62, dataset, cuda0, writer, i, list_of_streams[61])
    pytorchcsforecast_task.submit(63, dataset, cuda0, writer, i, list_of_streams[62])
    pytorchcsforecast_task.submit(64, dataset, cuda0, writer, i, list_of_streams[63])
    pytorchcsforecast_task.submit(65, dataset, cuda0, writer, i, list_of_streams[64])
    pytorchcsforecast_task.submit(66, dataset, cuda0, writer, i, list_of_streams[65])
    pytorchcsforecast_task.submit(67, dataset, cuda0, writer, i, list_of_streams[66])
    pytorchcsforecast_task.submit(68, dataset, cuda0, writer, i, list_of_streams[67])
    pytorchcsforecast_task.submit(69, dataset, cuda0, writer, i, list_of_streams[68])
    pytorchcsforecast_task.submit(70, dataset, cuda0, writer, i, list_of_streams[69])
    pytorchcsforecast_task.submit(71, dataset, cuda0, writer, i, list_of_streams[70])
    pytorchcsforecast_task.submit(72, dataset, cuda0, writer, i, list_of_streams[71])
    pytorchcsforecast_task.submit(73, dataset, cuda0, writer, i, list_of_streams[72])
    pytorchcsforecast_task.submit(74, dataset, cuda0, writer, i, list_of_streams[73])
    pytorchcsforecast_task.submit(75, dataset, cuda0, writer, i, list_of_streams[74])
    pytorchcsforecast_task.submit(76, dataset, cuda0, writer, i, list_of_streams[75])
    pytorchcsforecast_task.submit(77, dataset, cuda0, writer, i, list_of_streams[76])
    pytorchcsforecast_task.submit(78, dataset, cuda0, writer, i, list_of_streams[77])
    pytorchcsforecast_task.submit(79, dataset, cuda0, writer, i, list_of_streams[78])
    pytorchcsforecast_task.submit(80, dataset, cuda0, writer, i, list_of_streams[79])
    pytorchcsforecast_task.submit(81, dataset, cuda0, writer, i, list_of_streams[80])
    pytorchcsforecast_task.submit(82, dataset, cuda0, writer, i, list_of_streams[81])
    pytorchcsforecast_task.submit(83, dataset, cuda0, writer, i, list_of_streams[82])
    pytorchcsforecast_task.submit(84, dataset, cuda0, writer, i, list_of_streams[83])
@flow(log_prints=True)
def pytorchcs():
    time1 = dt.datetime.now()
    print(f'Дата и время старта: {time1}')
    cuda0 = torch.device('cuda:0')
    engine = create_engine('mssql+pyodbc://{PESANSQL01-DC}/{Python}?trusted_connection=yes&driver=SQL+Server+Native+Client+11.0', fast_executemany=True)
    tracking_address = 'F:/PyCharm/PerfectOrchestrator/DAGs/TensorBoardLogs'
    tensorboard = program.TensorBoard()
    tensorboard.configure(argv=[None, '--logdir', tracking_address])
    tensorboard_url = tensorboard.launch()
    print(f'Tensorboard listening on {tensorboard_url}')
    writer = SummaryWriter(f'TensorBoardLogs/{dt.datetime.now().strftime("%Y-%m-%d")}_1')
    normalization_type = 'store'
    StoreList = pd.read_sql("select distinct [StoreCode] from [PESANSQL01-DC].[Python].[dbo].[RL_Forecast] with (nolock) where [StoreCode] in ('МАГ599')", con=engine).to_numpy()
    for i in StoreList:
        print(f'Магазин {i[0]}')
        dataset = pd.read_sql(f"select * from [PESANSQL01-DC].[Python].[dbo].[RL_Forecast] with (nolock) where [StoreCode] = '{i[0]}'", con=engine)
        if not os.path.isdir(f'F:/PyCharm/RL/3) Прогнозирование продаж/Результаты/Магазины/{i[0]}'):
            os.mkdir(f'F:/PyCharm/RL/3) Прогнозирование продаж/Результаты/Магазины/{i[0]}')
        if normalization_type == 'store':
            dataset_datecode = dataset['DateCode'].to_numpy()
            dataset_storecode = dataset['StoreCode'].to_numpy()
            dataset_itemcode = dataset['ItemCode'].to_numpy()
            dataset_colorcode = dataset['ColorCode'].to_numpy()
            dataset_clusterid = dataset['ClusterId'].to_numpy()
            dataset = dataset.drop(columns=['DateCode', 'StoreCode', 'ItemCode', 'ColorCode', 'ClusterId'])
            df_mean = dataset.mean()
            df_std = dataset.std(ddof=0) + 1e-5
            dataset_normal = pd.DataFrame({'Parameter': df_mean.index, 'Mean': df_mean.values, 'Std': df_std.values})
            dataset_normal.to_excel(f'F:/PyCharm/RL/3) Прогнозирование продаж/Результаты/Магазины/{i[0]}/Нормализация на магазин.xlsx', index=False)
            dataset = dataset.drop(columns=['Discount'])
            dataset = (dataset - dataset.mean()) / (dataset.std(ddof=0) + 1e-5)
            dataset['DateCode'] = dataset_datecode
            dataset['StoreCode'] = dataset_storecode
            dataset['ItemCode'] = dataset_itemcode
            dataset['ColorCode'] = dataset_colorcode
            dataset['ClusterId'] = dataset_clusterid
        list_of_streams = []
        for s in range(84):
            list_of_streams.append(torch.cuda.Stream())
        pytorchcsforecast(dataset, cuda0, writer, i, list_of_streams)
        print(f'Обучение закончено, текущее время: {dt.datetime.now()}, времени затрачено: {dt.datetime.now() - time1}')
        writer.close()
        time.sleep(999999)

if __name__ == "__main__":
    pytorchcs()
