class DataFrameRedis:
    """DataFrame与Redis的存储与读取"""

    def __init__(self, redis_host: str = None,port: str = None, password: str = None):
        """
       :param redis_host:          IP地址
       :param password:            密码
        """
        self.redis_host = redis_host
        self.password = password
        self.port = port

    def df_to_redis(self, df: pd.DataFrame, key_name: str) -> None:
        """
        :param df:                  pandas 的 DataFrame
        :param key_name:            Redis键名
        """

        if self.password:                                                               # 建立Redis连接
            rs = redis.StrictRedis(host=self.redis_host,port=self.port, password=self.password)
        else:
            rs = redis.StrictRedis(host=self.redis_host,port=self.port)

        storage_size = (df.memory_usage().sum() / (1024 ** 2)) + 1                      # 查看数据占用的内存消耗

        # 计算切割大小
        if storage_size > 512:                                                          # 如果大于512MB
            cutting_n = int(storage_size // 512 + 2)                                    # 切割成多少份
            df_row = df.shape[0] // cutting_n                                           # df.shape[0]:数据的行数
            df_row_start = 0                                                            # 切割的起始行数

            # 循环切割入库
            for i in range(cutting_n):                                                  # 循环切割的份数
                df_row_end = df_row_start + df_row                                      # 找到结束的行数
                df_cutting = df.iloc[df_row_start: df_row_end, :]                       # 切割DataFrame
                df_row_start = df_row_end                                               # 开始的值等于结束的值
                df_bytes = pickle.dumps(df_cutting)                                      # DataFrame压缩成字节流
                rs.set(key_name + ':' + str(i), df_bytes)                               # 存入Redis
            df_cutting = df.iloc[df_row_end:, :]                                        # 获取最后一部分数据
            df_bytes = pickle.dumps(df_cutting)                                          # DataFrame压缩成字节流
            rs.set(key_name + ':' + str(i+1), df_bytes)                                 # 存入Redis

        else:
            df_bytes = pickle.dumps(df)
            rs.set(key_name, df_bytes)

    def get_redis_df(self, key_name: str) -> pd.DataFrame:
        """
        :param key_name:      数据库名
        :return:                DataFrame
        """

        if self.password:                                                               # 建立Redis连接
            rs = redis.StrictRedis(host=self.redis_host,port=self.port, password=self.password)
        else:
            rs = redis.StrictRedis(host=self.redis_host,port=self.port)

        keys = rs.keys()                                                                # 获取所有Redis键名
        redis_df_all: pd.DataFrame = pd.DataFrame(columns=[])                           # 创建空DataFrame
        for key in keys:
            key = str(key, 'utf-8')                                                     # 转str utf-8
            if key_name in str(key):                                                    # 如果key_name在键名里
                df_bytes_from_redis = rs.get(key)                                       # 读取数据
                df_from_redis = pickle.loads(df_bytes_from_redis)                    # 字节流转DataFrame
                redis_df_all = pd.concat(                                               # 添加到变量redis_df_all中
                    [redis_df_all, df_from_redis],
                    ignore_index=True)
        return redis_df_all
    
redis_host = '192.168.0.199'
port = '6400'
password = 'Jf_datacenter8'
rs = DataFrameRedis(redis_host,port,password)
start_time = time.time()
df_caiwu_data = rs.get_redis_df('df_caiwu_data')
# df_caiwu_station = rs.get_redis_df('df_caiwu_station')
print(time.time()-start_time)