import pandas as pd
import numpy as np
import torch
import math

from index_factors import df as data


def data_eval(seq_length=20):
    feat_all = []
    feat_all.append(np.array(data.iloc[-seq_length:, :-1].T))
    feat_all = np.array(feat_all)
    return torch.from_numpy(feat_all.astype(np.float32))

def data_generator(split=0.3, bs=1, seq_length=20):
    df = data
    # df_label = df.iloc[:, -1]
    # weight = 1 / np.log(df_label.apply(sum))
    # df = df.sample(frac=1, random_state=111)  # shuffle
    # print(df.head(5))
    # print(df.shape[0])
    index_sp = int(df.shape[0] * split)
    df_train = df[:index_sp]
    df_test = df[index_sp:]
    return DataLoader(df_train, bs, seq_length), DataLoader(df_test, bs, seq_length)


# def data_eval(bs=1):
#     info = []
#     with open(path_sub_txt, 'r') as f:
#         for l in f.readlines():
#             row = l.rstrip().split('\t')
#             info.append(row[:3])
#     df_info = pd.DataFrame(info, columns=['file', 'age', 'gender'])
#
#     fo = open(path_arrythmia, "r", encoding='utf-8')
#     hash_tag = {}
#     i = 0
#     for line in fo.readlines():  # 依次读取每行
#         line = line.strip()  # 去掉每行头尾空白
#         hash_tag[i] = line
#         i += 1
#     return DataLoaderEval(df_info, bs), hash_tag, df_info


class DataLoader():
    def __init__(self, df, batch_size=1, seq_length=20):
        self.time = ''
        self.df = df
        self.batch_size = batch_size
        self.nB = math.ceil((df.shape[0] - seq_length) / batch_size)
        self.seq_length = seq_length
        assert self.nB > 0, 'Dataset is empty'

    def __iter__(self):
        self.count = 0
        return self

    def __next__(self):
        if self.count == (self.nB - 1) * self.batch_size:
            raise StopIteration

        feat_all = []
        labels_all = []
        for index in enumerate(range(self.batch_size)):
            ia = self.count
            if ia == 2844:
                print('debug')
            ib = min(ia + self.seq_length, self.df.shape[0])

            feat_all.append(np.array(self.df.iloc[ia: ib, :-1].T))
            labels_all.append(np.array(self.df.iloc[ib - 1, -1]))
            self.count += 1

        feat_all = np.array(feat_all)
        labels_all = np.array(labels_all)
        return torch.from_numpy(feat_all.astype(np.float32)), torch.from_numpy(labels_all.astype(np.float32))

    def __len__(self):
        return self.nB  # number of batches


# class DataLoaderEval():
#     def __init__(self, df_y, batch_size=1):
#         self.time = ''
#         self.df_y = df_y
#         self.batch_size = batch_size
#         self.nB = math.ceil(df_y.shape[0] / batch_size)
#         assert self.nB > 0, 'Dataset is empty'
#
#     def __iter__(self):
#         self.count = -1
#         return self
#
#     def __next__(self):
#         self.count += 1
#         if self.count == self.nB:
#             raise StopIteration
#
#         ia = self.count * self.batch_size
#         ib = min((self.count + 1) * self.batch_size, self.df_y.shape[0])
#
#         feat_all = []
#         for index, files_index in enumerate(range(ia, ib)):
#             df = pd.read_csv(path_head + path_submitA + self.df_y.iloc[files_index, 0], sep=' ')
#             if df is None:
#                 continue
#             df['III'] = df['II'] - df['I']
#             df['aVR'] = -(df['II'] + df['I']) / 2
#             df['aVL'] = (df['I'] - df['II']) / 2
#             df['aVF'] = (df['II'] - df['I']) / 2
#             feat_all.append(np.array(df))
#
#         feat_all = np.array(feat_all)
#         return torch.from_numpy(feat_all.astype(np.float32)), self.df_y.iloc[ia:ib, :]
#
#     def __len__(self):
#         return self.nB  # number of batches

