import bisect
from math import ceil
import os,torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from typing import Literal

from .tools import StandardScaler, Scaler, ZeroMaxScaler

import warnings
warnings.filterwarnings('ignore')

ProcMethod=Literal["mean","sum","last","first"]

class DatasetTrafficConifg:
    datatype:str# Data Type
    xfea:int    # Number of input features (enc_in)
    xlen:int    # Length of input sequence (input_size)
    yfea:int    # Number of output features (dec_in)
    ylen:int    # Legnth of output sequence (predict_step)
    xproc:ProcMethod    # Sample procession of input
    yproc:ProcMethod    # Sample procession of output
    sr:int      # Sample range
    inv:bool    # Denormalize the output
    nonol:bool  # Non-overlap
    future:bool # Predict future or generate current
    def __init__(self,datatype,xfea,xlen,yfea,ylen,xproc,yproc,sr,inv,nonol,future):
        self.datatype=datatype
        self.sr=sr;self.inv=inv
        self.xfea=xfea;self.yfea=yfea
        self.xlen=xlen;self.ylen=ylen
        self.xproc=xproc;self.yproc=yproc
        self.nonol=nonol;self.future=future
        assert self.xlen>=self.ylen
    
    @staticmethod
    def fromOpt(opt):
        return DatasetTrafficConifg(opt.data,opt.enc_in,opt.input_size,opt.dec_in,opt.predict_step,
            opt.xpm,opt.ypm,opt.sr,opt.inverse,opt.non_overlap,not opt.non_future)

class TrafficData:
    data_x: np.ndarray
    data_y: np.ndarray
    @staticmethod
    def __elim(npd:np.ndarray,sample_range:int,proc:ProcMethod):
        n,_=npd.shape
        ret=[]
        for i in range(n//sample_range):
            d0:np.ndarray=npd[sample_range*i:sample_range*(i+1)]
            if proc=='sum':
                ret.append(d0.sum(axis=0))
            elif proc=='mean':
                ret.append(d0.mean(axis=0))
            elif proc=='first':
                ret.append(d0[0])
            elif proc=='last':
                ret.append(d0[-1])
        ret=np.stack(ret,axis=0)
        return ret

    def __len_f_o(self): return len(self.data_x) - self.cfg.xlen - self.cfg.ylen + 1
    def __len_f_no(self): return (len(self.data_x) - self.cfg.xlen) // self.cfg.ylen
    def __len_nf_o(self): return len(self.data_x) - self.cfg.xlen + 1
    def __len_nf_no(self): return (len(self.data_x) - self.cfg.xlen) // self.cfg.ylen + 1

    def __get_nf_o(self, index):
        p = index + self.cfg.xlen
        q = index + self.cfg.ylen
        return self.data_x[index:p], self.data_y[index:q,self.in_fea:], self.data_stamp[index:p], self.data_stamp[index:q]
    
    def __get_nf_no(self, index):
        l = index * self.cfg.ylen
        rx = l + self.cfg.xlen
        ry = l + self.cfg.ylen
        return self.data_x[l:rx], self.data_y[l:ry,self.in_fea:], self.data_stamp[l:rx], self.data_stamp[l:ry]
    
    def __get_f_o(self, index):
        p = index + self.cfg.xlen
        end = p + self.cfg.ylen
        return self.data_x[index:p], self.data_y[p:end,self.in_fea:], self.data_stamp[index:p], self.data_stamp[p:end]
    
    def __get_f_no(self, index):
        l = index * self.cfg.ylen
        m = l + self.cfg.xlen
        r = m + self.cfg.ylen
        return self.data_x[l:m], self.data_y[m:r,self.in_fea:], self.data_stamp[l:m], self.data_stamp[m:r]
    
    def __init__(self, root_path:str, cfg:DatasetTrafficConifg):
        self.cfg = cfg
        self.root_path = root_path

        df_in = pd.read_csv(os.path.join(self.root_path,"input.csv"))
        df_out = pd.read_csv(os.path.join(self.root_path,"output.csv"))

        self.org_stamp = np.array(df_in['time'])[::self.cfg.sr]
        self.data_stamp = np.array([((x%60-30)/30.0,((x//60)%24-12)/12.0,(x//1440-3)/3.0) for x in self.org_stamp])

        df_in.pop("time"); df_out.pop("time")
        p1=self.__elim(df_in.to_numpy(),self.cfg.sr,self.cfg.xproc)
        self.in_fea=p1.shape[1]
        p2=self.__elim(df_out.to_numpy(),self.cfg.sr,self.cfg.yproc)
        self.out_fea=p2.shape[1]
        self.data_x=np.concatenate([p1,p2],axis=1)
        self.data_y=self.data_x.copy()

        f = 1 if self.cfg.future else 0
        o = 0 if self.cfg.nonol else 1
        self.__my_len = ((self.__len_nf_no,self.__len_nf_o),(self.__len_f_no,self.__len_f_o))[f][o]
        self.__my_getitem = ((self.__get_nf_no,self.__get_nf_o),(self.__get_f_no,self.__get_f_o))[f][o]

    def get_data_x(self,l:int,r:int):
        l=bisect.bisect_left(self.org_stamp,l)
        r=bisect.bisect_left(self.org_stamp,r)
        if l<r: return self.data_x[l:r]
        return None
    
    def __len__(self): return self.__my_len()

    def __getitem__(self, index:int): return self.__my_getitem(index)
    
    def transform(self,scaler:Scaler):
        self.data_x = scaler.transform(self.data_x)
        if not self.cfg.inv: self.data_y = scaler.transform(self.data_y)

class Dataset_Traffic(Dataset):
    def __init__(self, root_path:str, opt, flag:str='train'):
        assert flag in ['train', 'test', 'val']
        self.set_type = {'train':0, 'val':1, 'test':2}[flag]
        self.root_path = root_path
        cfg = DatasetTrafficConifg.fromOpt(opt)

        dirs=os.listdir(self.root_path)
        ld = len(dirs)
        assert ld>=3, "数据集太少, 无法分割成train, test和val"
        prop = max(round(ld*0.15),1)
        ld1 = ld - prop * 2
        ld2 = ld - prop
        border1 = [0,  ld1,ld2][self.set_type]
        border2 = [ld1,ld2,ld ][self.set_type]
        self.data:list[TrafficData] = []
        for idx in tqdm(dirs[border1:border2], mininterval=2,desc='  - (Loading)   ', leave=False):
            self.data.append(TrafficData(os.path.join(self.root_path, idx), cfg))

        self._slen=len(self.data)
        self._len0=len(self.data[0])
        self._lenA=self._len0*self._slen

        train = np.concatenate([self.data[i].data_x for i in range(self._slen)], axis=0)
        self.in_fea = self.data[0].in_fea
        self.out_fea = self.data[0].out_fea

        if opt.scaler == "0max":
            self.scaler = ZeroMaxScaler() 
        elif opt.scaler == "std":
            self.scaler = StandardScaler()
        else:
            raise ValueError(f"Invalid scaler: {opt.scaler}. Only '0max' and 'std' are support.")

        self.scaler.fit(train)

        for i in range(self._slen):
            self.data[i].transform(self.scaler)
        
    def __getitem__(self, index:int):
        return self.data[index//self._len0].__getitem__(index%self._len0)
    
    def __len__(self):
        return self._lenA

    def inverse_transform(self, data:torch.Tensor, seq_y:torch.Tensor) -> 'tuple[torch.Tensor,torch.Tensor]':
        return self.scaler.inverse_transform(data), seq_y
    
def get_all_v(train_data, train_end, seq_len, pred_len, window_stride, type):
    """Get the normalization parameters of each sequence"""
    seq_num = train_data.size(0)
    window_per_seq = (train_end - seq_len - pred_len) // window_stride
    window_number = seq_num * window_per_seq

    v = torch.zeros(window_number, dtype=torch.float64)
    for index in range(window_number):
        seq_idx = index // window_per_seq
        window_idx = index % window_per_seq

        s_begin = window_idx * window_stride
        s_end = s_begin + seq_len

        seq_x = train_data[seq_idx, s_begin:s_end].clone()
        if type == 'mean':
            mean = seq_x.mean()
            v[index] = mean + 1
        else:
            std = seq_x.std()
            v[index] = std

    return v


def gen_covariates(times, num_covariates):
    """Get covariates"""
    covariates = np.zeros((times.shape[0], num_covariates))
    for i, input_time in enumerate(times):
        covariates[i, 0] = input_time.weekday() / 7
        covariates[i, 1] = input_time.hour / 24
        covariates[i, 2] = input_time.month / 12

    return covariates