import requests
from pathlib import Path
import pickle
from shutil import unpack_archive

urls = dict()
urls['ecg'] = ['http://www.cs.ucr.edu/~eamonn/discords/ECG_data.zip',
               'http://www.cs.ucr.edu/~eamonn/discords/chfdbchf15.txt']
urls['space_shuttle'] = ['http://www.cs.ucr.edu/~eamonn/discords/TEK16.txt']
urls['respiration'] = ['http://www.cs.ucr.edu/~eamonn/discords/nprs44.txt']

for datasetName in urls:
    # 下载并解压数据集
    raw_dir = Path('dataset', datasetName, 'raw')
    raw_dir.mkdir(parents=True, exist_ok=True)
    for url in urls[datasetName]:
        filename = raw_dir.joinpath(Path(url).name)
        print('Downloading', url)
        resp = requests.get(url)
        filename.write_bytes(resp.content)
        if filename.suffix == '':
            filename.rename(filename.with_suffix('.txt'))
        print('Saving to', filename.with_suffix('.txt'))
        if filename.suffix == '.zip':
            print('Extracting to', filename)
            unpack_archive(str(filename), extract_dir=str(raw_dir))

    # 处理下载的数据集
    for filepath in raw_dir.glob('*.txt'):
        with open(str(filepath)) as f:
            # 将异常数据的标签设置为1，非异常数据的标签设置为0
            labeled_data = []
            for i, line in enumerate(f):
                tokens = [float(token) for token in line.split()]
                if raw_dir.parent.name == 'ecg':
                    # ecg数据集，将第一列的数据删除
                    tokens.pop(0)
                # 设置标签
                if filepath.name == 'chfdb_chf01_275.txt':
                    tokens.append(1.0) if 2330 < i < 2500 else tokens.append(0.0)
                elif filepath.name == 'chfdb_chf13_45590.txt':
                    tokens.append(1.0) if 2830 < i < 2870 else tokens.append(0.0)
                elif filepath.name == 'chfdbchf15.txt':
                    tokens.append(1.0) if 2275 < i < 2315 else tokens.append(0.0)
                elif filepath.name == 'nprs44.txt':
                    tokens.append(1.0) if 16192 < i < 16638 or 20457 < i < 23457 else tokens.append(0.0)
                elif filepath.name == 'TEK16.txt':
                    tokens.append(1.0) if 4270 < i < 4420 else tokens.append(0.0)

                # 存放带标签的数据
                labeled_data.append(tokens)

            # 将带标签的数据以pkl文件的形式存储
            labeled_whole_dir = raw_dir.parent.joinpath('labeled', 'whole')
            labeled_whole_dir.mkdir(parents=True, exist_ok=True)
            with open(str(labeled_whole_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                pickle.dump(labeled_data, pkl)

            # 将带标签的数据集分为训练集和测试集两部分
            labeled_train_dir = raw_dir.parent.joinpath('labeled', 'train')
            labeled_train_dir.mkdir(parents=True, exist_ok=True)
            labeled_test_dir = raw_dir.parent.joinpath('labeled', 'test')
            labeled_test_dir.mkdir(parents=True, exist_ok=True)

            if filepath.name == 'chfdb_chf01_275.txt':
                with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[:1833], pkl)
                with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[1833:3674], pkl)
            elif filepath.name == 'chfdb_chf13_45590.txt':
                with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[:2439], pkl)
                with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[2439:3726], pkl)
            elif filepath.name == 'chfdbchf15.txt':
                with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[3381:14244], pkl)
                with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[33:3381], pkl)
            elif filepath.name == 'nprs44.txt':
                with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[363:12955], pkl)
                with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[12955:24082], pkl)
            elif filepath.name == 'TEK16.txt':
                with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[521:3588], pkl)
                with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
                    pickle.dump(labeled_data[3588:4539], pkl)
