from torch.utils.data import Dataset, DataLoader
import os
import csv
from torch import Tensor

class StoDataset(Dataset):

    def __init__(self, input_sequence_length=6):
        self.origin_data = []
        with open(os.path.dirname(__file__) + os.sep + './sto.csv', 'r') as fp:
            csv_reader = csv.DictReader(fp)
            for row in csv_reader:
                self.origin_data.append([
                    float(row['Open']),
                    float(row['Max']),
                    float(row['Min']),
                    float(row['Close'])
                ])
        self.init_train_data()
        self.input_sequence_length = input_sequence_length

    def init_train_data(self):
        self.train_data = []
        for idx in range(len(self.origin_data) - 1):
            d0 = self.origin_data[idx]
            d1 = self.origin_data[idx + 1]
            sample = [d1[i] / d0[i] for i in range(len(d0))]
            self.train_data.append(sample)

    def __len__(self):
        return len(self.train_data) - self.input_sequence_length

    def __getitem__(self, idx):
        input_sequence = self.train_data[idx : (idx + self.input_sequence_length)]
        output_sequence = self.train_data[(idx + self.input_sequence_length) : (idx + self.input_sequence_length + 1)]
        return Tensor(input_sequence), Tensor([y[0] for y in output_sequence])

class StoDataset2(Dataset):

    def __init__(self, input_sequence_length=6):
        self.origin_data = []
        with open(os.path.dirname(__file__) + os.sep + './sto.csv', 'r') as fp:
            csv_reader = csv.DictReader(fp)
            for row in csv_reader:
                self.origin_data.append([
                    float(row['Open']),
                    float(row['Max']),
                    float(row['Min']),
                    float(row['Close'])
                ])
        self.init_train_data()
        self.input_sequence_length = input_sequence_length

    def init_train_data(self):
        self.train_data = []
        for idx in range(len(self.origin_data) - 1):
            d0 = self.origin_data[idx]
            d1 = self.origin_data[idx + 1]
            sample = [d1[i] / d0[i] for i in range(len(d0))]
            self.train_data.append(sample)

    def __len__(self):
        return len(self.train_data) - self.input_sequence_length

    def __getitem__(self, idx):
        input_sequence = self.train_data[idx : (idx + self.input_sequence_length)]
        output_sequence = self.train_data[(idx + self.input_sequence_length) : (idx + self.input_sequence_length + 1)]
        if output_sequence[0][0] > 1:
            result = [1, -1, -1]
        if output_sequence[0][0] < 1:
            result = [-1, 1, -1]
        if output_sequence[0][0] == 1:
            result = [-1, -1, 1]
        return Tensor(input_sequence), Tensor(result)

def iter_data(batch_size=1):
    dataset_object = StoDataset()
    data_loader = DataLoader(dataset_object, batch_size=batch_size)
    for x, y in data_loader:
        yield x, y

def iter_data2(batch_size=1):
    dataset_object = StoDataset2()
    data_loader = DataLoader(dataset_object, batch_size=batch_size)
    for x, y in data_loader:
        yield x, y

def show_dataset():
    for x, y in iter_data(1):
        print('x:', x)
        print('y:', y)
        print('(batch_size=1)')
        break

def show_dataset2():
    for x, y in iter_data2(1):
        print('x:', x)
        print('y:', y)
        print('(batch_size=1)')
        break

if __name__ == '__main__':
    show_dataset()
    show_dataset2()
