import hashlib
import os
import tarfile
import zipfile

import pandas as pd
import requests


class FileDownloader:
    DATA_HUB = dict()
    DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'

    @classmethod
    def download(cls, name, cache_dir=os.path.join('..', 'data')):
        assert name in cls.DATA_HUB, f"{name} 不存在于 {cls.DATA_HUB}."
        url, sha1_hash = cls.DATA_HUB[name]
        os.makedirs(cache_dir, exist_ok=True)
        fname = os.path.join(cache_dir, url.split('/')[-1])

        if os.path.exists(fname):
            sha1 = hashlib.sha1()
            with open(fname, 'rb') as f:
                while True:
                    data = f.read(1048576)
                    if not data:
                        break
                    sha1.update(data)
            if sha1.hexdigest() == sha1_hash:
                print('文件已存在！')
                return fname

        print(f'正在从{url}中下载{fname}...')
        try:
            r = requests.get(url, stream=True, verify=True)
            r.raise_for_status()
            with open(fname, 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
        except requests.RequestException as e:
            print(f"下载失败: {e}")
            return None

        return fname

    @classmethod
    def download_extract(cls, name, folder=None):
        fname = cls.download(name)
        if fname is None:
            return None
        base_dir = os.path.dirname(fname)
        data_dir, ext = os.path.splitext(fname)
        if ext == '.zip':
            fp = zipfile.ZipFile(fname, 'r')
        elif ext in ('.tar', '.gz'):
            fp = tarfile.open(fname, 'r')
        else:
            assert False, '只可以解压zip/tar哦！'
        fp.extractall(base_dir)
        fp.close()
        return os.path.join(base_dir, folder) if folder else data_dir

    @classmethod
    def download_all(cls):
        for name in cls.DATA_HUB:
            cls.download(name)


# 文件下载示例
if __name__ == '__main__':
    # 假设 DATA_HUB 已经被填充了数据
    DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
    FileDownloader.DATA_HUB = {
        # 二元组分别对应其url和密钥
        'kaggle_house_train': (DATA_URL + 'kaggle_house_pred_train.csv', '585e9cc93e70b39160e7921475f9bcd7d31219ce'),
        'kaggle_house_test': (DATA_URL + 'kaggle_house_pred_test.csv', 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90'),
    }

    # 2-进行文件下载
    train_data = pd.read_csv(FileDownloader.download('kaggle_house_train'))
    test_data = pd.read_csv(FileDownloader.download('kaggle_house_test'))

    # 3-查看数据格式
    print('----------train_data.shape：', train_data.shape)
    print('----------test_data.shape：', test_data.shape)
