import os
import re
import time
import numpy as np
import pandas as pd
import random
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from utils import tools
import torch
from sklearn import preprocessing

def isdb_prepare(
        root_path='/home/kexin/phdwork/work4-tii/data',
        data_path='Isdb',
        data_name='raw_isdb',
        maxlength=400):
    # ------------读取粘滞回路数据和回路名: stictiondata/stictionloop---------------

    filepath = os.path.join(root_path, data_path, data_name, 'stiction_loops')
    os.chdir(filepath)
    filelist = os.listdir(filepath)
    stictiondata = {}
    for i in range(len(filelist)):
        datapath = os.path.join(filepath, filelist[i])
        stictiondata[os.path.basename(datapath)[0:-4]] = pd.read_csv(datapath, header=None, skiprows=4,
                                                                      engine='python',
                                                                      names=[os.path.basename(datapath)[0:-4]])
    print('-> {num} files in: {path}'.format(num=len(filelist), path=filepath))
    stictionloop = list(map(lambda x: x[0:-3], list(stictiondata.keys())))
    stictionloop = list(set(stictionloop))
    #print('-> Number of Stiction Loops: ', len(stictionloop))

    # 读取非粘滞回路数据和回路名:normaldata/nomalloop
    filepath = os.path.join(root_path, data_path, data_name, 'nonstiction_loops')
    #print(filepath)
    os.chdir(filepath)
    filelist = os.listdir(filepath)
    normaldata = {}
    for i in range(len(filelist)):
        datapath = os.path.join(filepath, filelist[i])
        normaldata[os.path.basename(datapath)[0:-4]] = pd.read_csv(datapath, header=None, skiprows=4,
                                                                    engine='python',
                                                                    names=[os.path.basename(datapath)[0:-4]])
    print('-> {num} files in: {path}'.format(num=len(filelist), path=filepath))
    normalloop = list(map(lambda x: x[0:-3], list(normaldata.keys())))
    normalloop = list(set(normalloop))
    #print('-> Number of Nonstiction Loops: ', len(normalloop))

    # ----读取回路信息，包括回路样本数量和采样周期: loop1info-------
    filepath = os.path.join(root_path, data_path, data_name, 'loopinfo')
    filelist = os.listdir(filepath)
    loopinfo = pd.DataFrame(columns=['loopname', 'ts', 'samplenum'])
    pattern1 = re.compile('Ts: \d.+|Ts: \d+')
    pattern2 = re.compile(r'\d.+|\d+')
    pattern3 = re.compile(r'PV: \[\d+')
    pattern4 = re.compile(r'\d+')
    for i in range(len(filelist)):
        datapath = os.path.join(filepath, filelist[i])
        with open(datapath, 'r') as f:
            tstemp = f.readlines()
            numtemp = tstemp.copy()
            # 查询ts
            tstemp = list(map(lambda x: pattern1.search(x), tstemp))
            tstemp = list(filter(None, tstemp))[0]
            ts = pattern2.search(tstemp.group(0)).group(0)
            # 查询样本数量
            numtemp = list(map(lambda x: pattern3.search(x), numtemp))
            numtemp = list(filter(None, numtemp))[0]
            num = pattern4.search(numtemp.group(0)).group(0)
        loopinfo.loc[i, 'loopname'] = os.path.basename(datapath)[0:-4]
        loopinfo.loc[i, 'ts'] = ts
        loopinfo.loc[i, 'samplenum'] = num
    loopinfo.to_csv(os.path.join(root_path, data_path, 'isdb/loopinfo.csv'))

    # 将粘滞回路数据按照dict格式存放：stictiondata
    # stictionloop
    stictiondatadict = {}
    for loop in stictionloop:
        df = pd.DataFrame(columns=['op', 'pv', 'sp'])
        df.op = stictiondata[loop + '.OP'].values.flatten()
        df.pv = stictiondata[loop + '.PV'].values.flatten()
        df.sp = stictiondata[loop + '.SP'].values.flatten()
        stictiondatadict[loop] = df.values
    print('-> Number of Siction Loops:', len(list(stictiondatadict.keys())))
    stictioninfo = loopinfo.loc[loopinfo['loopname'].isin(stictionloop)]

    # 将正常回路数据按照dict格式存放： normaldatadict
    # non-stictionloop
    normaldatadict = {}
    for loop in normalloop:
        df = pd.DataFrame(columns=['op', 'pv', 'sp'])
        df.op = normaldata[loop + '.OP'].values.flatten()
        df.pv = normaldata[loop + '.PV'].values.flatten()
        df.sp = normaldata[loop + '.SP'].values.flatten()
        normaldatadict[loop] = df.values
    print('-> Number of Nonstiction Loops:', len(list(normaldatadict.keys())))
    normalinfo = loopinfo.loc[loopinfo['loopname'].isin(normalloop)]

    data_preprocess = tools.SequencePreprocessing(expected_length=maxlength)
    sticf = {}
    for k, v in stictiondatadict.items():
        v = data_preprocess.lengthwrapping(v)
        v = data_preprocess.noisefilter(v)

        sticf[k] = v[:,0:2]

    normf = {}
    for k, v in normaldatadict.items():
        v = data_preprocess.lengthwrapping(v)
        v = data_preprocess.noisefilter(v)
        normf[k] = v[:,0:2]

    # Save as np
    np.save(os.path.join(root_path, data_path, 'isdb/sticdata.npy'), sticf)
    np.save(os.path.join(root_path, data_path, 'isdb/nonsticdata.npy'), normf)

test26loops = ['chemicals_loop2', 'pulpPapers_loop5', 'chemicals_loop12', 'chemicals_loop32', 'chemicals_loop1',
          'chemicals_loop26', 'chemicals_loop11', 'chemicals_loop24', 'chemicals_loop23', 'pulpPapers_loop2',
          'chemicals_loop10', 'chemicals_loop29', 'chemicals_loop5', 'chemicals_loop6', 'mining_loop1',
          'chemicals_loop58', 'pulpPapers_loop9', 'chemicals_loop33', 'chemicals_loop34', 'pulpPapers_loop7',
          'chemicals_loop13', 'chemicals_loop14', 'chemicals_loop16', 'pulpPapers_loop4', 'chemicals_loop3',
          'chemicals_loop4']

def split_data(data, dict_class, test_mode, test_n, preprocessingflag):
    dp = tools.DataPreprocessing(mode=preprocessingflag)
    train = []
    test = []
    loops = data.keys()
    if dict_class == 'stic':
        label = np.array([1])
    else:
        label = np.array([0])

    if test_mode == 'test_26':
        for loop in loops:
            xx = dp.preprocess(data[loop])[0]
            if loop in test26loops:
                test.append((xx, label, loop))
            else:
                train.append((xx, label, loop))
    elif test_mode == 'test_random':
        random.seed(0)
        idx = random.sample(range(0, len(loops)), test_n)
        for i, loop in enumerate(loops):
            xx = dp.preprocess(data[loop])[0]
            if i in idx:
                test.append((xx, label, loop))
            else:
                train.append((xx, label, loop))

    return train, test

def loopencoding(stic, nostic):
    loop = list(stic.keys())
    nonstic_loop = list(nostic.keys())
    loop.extend(nonstic_loop)
    enc = preprocessing.OrdinalEncoder()
    enc.fit(loop)
    return enc.transform(loop)

class Dataset_isdb(Dataset):
    def __init__(self,
                 root_path,
                 data_path,
                 data_name,
                 flag='TRAIN',
                 config=None
                 ):
        self.root_path = root_path
        self.data_path = data_path
        self.data_name = data_name
        self.flag = flag
        self.config = config


        self.preprocessing = 'standard' # or 'standard' 'maxmin' 'norm'
        self.test_mode = config.test_mode
        self.test_n = config.test_n
        self.xs, self.labels, self.info = self.__read_data() #  xx, label, loop

        # return useful information
        self.ts_dim = self.xs.shape[2] # dim of ts
        self.ts_len = self.xs.shape[1] # sequence of ts
        self.ts_num = self.xs.shape[0] # number of ts
        self.num_classes = 2
        self.class_names = ('nonstiction', 'stiction')

    def __read_data(self):
        datapath = os.path.join(self.root_path, self.data_path, self.data_name, 'sticdata.npy')
        sticdict = np.load(datapath, allow_pickle=True).item()
        datapath = os.path.join(self.root_path, self.data_path, self.data_name, 'nonsticdata.npy')
        nonsticdict = np.load(datapath, allow_pickle=True).item()

        stictrain, stictest = split_data(
            data=sticdict, dict_class='stic', test_mode=self.test_mode, test_n=self.test_n, preprocessingflag=self.preprocessing)
        nonstictrain, nonstictest = split_data(
            data=nonsticdict, dict_class='nonstic', test_mode=self.test_mode, test_n=self.test_n, preprocessingflag=self.preprocessing)

        if self.flag == 'TRAIN':
            stictrain.extend(nonstictrain)
            D = stictrain
        elif self.flag == 'TEST':
            stictest.extend(nonstictest)
            D = stictest

        # create data, labels, info
        x, target, info = zip(*D)
        x = np.stack(x, axis=0)
        target = list(target)
        info = list(info)
        return x, target, info

    def __getitem__(self, idx):
        return torch.from_numpy(self.xs[idx]), torch.from_numpy(self.labels[idx]), self.info[idx]

    def __len__(self):
        return self.ts_num

if __name__ == '__main__':
    #isdb_prepare(maxlength=600)
    #print('Finish isdb')
    import options

    args = options.Options().parse()
    print('-------------------')
    #print(os.path.abspath(args.root_path))
    mydata = Dataset_isdb(
        root_path=args.root_path,
        data_path=args.data_path,
        data_name=args.data_name,
        flag='TRAIN',
        config=args)
    print(len(mydata))
    for i in range(len(mydata)):
        xs, labels, info = mydata[i]
        print(info, xs.shape, labels)






