#!/usr/bin/python
# -*-coding:utf-8-*-
'''Some Support Functions for Different Utilities'''
import os
import _pickle as cPickle
import pandas as pd
import logging
import json
import numpy as np
from datetime import datetime

### 底层读取数据的依赖（不提供）
from zbc_factor_lib.base.factors_library_base import NewRQFactorLib as DataReader

def writeToFile(file, store_path, mode='wb'):
    with open(store_path, mode) as f:
        cPickle.dump(file, f)

def loadFile(file_path, mode='rb'):
    with open(file_path, mode) as f:
        return cPickle.load(f)

def loadLogFile(file_path):
    with open(file_path, 'r') as f:
        data = []
        for line in f:
            if line != '\n':
                data.append(line.strip('\n').split('INFO ')[-1])
        return data

def writeJson(file, file_path):
    with open(file_path, 'w') as f:
        json.dump(file, f, ensure_ascii=False)

def loadJson(file_path):
    with open(file_path, 'r') as f:
        return json.load(f)


def memory_status():
    mem = {}
    with open('/proc/meminfo') as f:
        lines = f.readlines()
        for line in lines:
            if len(line) < 2:
                continue
            name = line.split(':')[0]
            var = line.split(':')[1].split()[0]
            mem[name] = float(var) / (1024. **2)
    mem['MemUsed'] = mem['MemTotal'] - mem['MemFree'] - mem['Buffers'] - mem['Cached']
    return mem

def setlogging(filename):
    logging.basicConfig(filename=filename,filemode = 'a',level = logging.INFO,\
        format = '%(asctime)s:%(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\
        datefmt = '%Y-%m-%d %H:%M:%S')

def setLogger(name, filename):
    logger = logging.getLogger(name)
    formatter = logging.Formatter('%(asctime)s: %(process)d %(levelname)s %(message)s')
    file_handler = logging.FileHandler(filename)
    file_handler.setFormatter(formatter)
    logger.setLevel(logging.INFO)
    logger.addHandler(file_handler)
    return logger

def one_hot(vector, num_classes=None):
    # assert isinstance(vector, np.ndarray)
    if isinstance(vector, np.ndarray):
        assert vector.shape[0] > 0

        if num_classes is None:
            num_classes = np.max(vector) + 1
        else:
            assert num_classes > 0
            assert num_classes >= np.max(vector)

        result = np.zeros(shape=(vector.shape[0], num_classes))
        result[np.arange(vector.shape[0]), vector] = 1
    else:
        # single_num == vector
        result = np.zeros(shape=(1, num_classes))
        result[0, vector] = 1
    return result.astype(np.int64)

def chunck_slice(IterObj, n):
    '''
    Slice An Iterated Object into n chunks

    e.g.: [0, 1, 2, 3, 4, 5, 6, 7] => [0, 2, 4, 6], [1, 3, 5, 7]

    Args:
        IterObj: Iterated Object, like list
        n: Number of chunks to slice

    Return: A list of sliced chunks
    '''
    i = 0
    Chunck_IterObj = [[] for _ in range(n)]
    for item in IterObj:
        Chunck_IterObj[i % n].append(item)
        i += 1
    return Chunck_IterObj

def repeat_one_dim_array(one_dim_arr, n, axis=0, copy=True):
    one_dim_arr = one_dim_arr.copy() if copy else one_dim_arr
    if axis == 0:
        return np.tile(one_dim_arr, (n, 1))
    elif axis == 1:
        return np.tile(one_dim_arr, (n, 1)).transpose()
    else:
        assert 'input axis error'

def repeat_two_dim_array(two_dim_arr, n, axis=0, copy=True):
    two_dim_arr = two_dim_arr.copy() if copy else two_dim_arr
    if axis == 0:
        return np.tile(two_dim_arr, (n, 1)).reshape(n, two_dim_arr.shape[0], two_dim_arr.shape[1])
    elif axis == 1:
        return np.tile(two_dim_arr, (n, 1)).reshape(two_dim_arr.shape[0], n, two_dim_arr.shape[1])
    elif axis == 2:
        return np.swapaxes(np.tile(two_dim_arr, (n, 1)).reshape(two_dim_arr.shape[0], n, two_dim_arr.shape[1]), axis1=1, axis2=2)
    else:
        assert 'input axis error'

def unixtime_converter(unix_timestamp, format='%Y-%m-%d %H:%M:%S'):
    return datetime.fromtimestamp(int(unix_timestamp)).strftime(format)


# def cum_rank(x):
#     cum_ranking_values = []
#     for i in range(x.shape[0]):
#         if i == 0:
#             cum_ranking_values.append(1)
#         else:
#             temp = rankdata(x[:i+1])
#             temp /= float(temp.max())
#             cum_ranking_values.append(temp[-1])
#     return np.array(cum_ranking_values)

def data_1d_to_3d(np_arr, h, w, c, mode=1):
    '''
    For CNN, for one sample, it is a 3-D array (height, width, chanels) short for (h, w, c)
    Assume in put array is 1D-data, shape-like: (n, h*c*w) for n samples
    Args:
        np_arr: numpy array, shape like (n, h*c*w)
        h: height
        w: width
        c: chanels
       mode: int, 1 to 6

    return: reshaped array data, shape-like (n, h, w, c)

    Notes:
        mode:
        1. h - w - c
        2. w - c - h
        3. c - w - h
        4. c - h - w
        5. w - h - c
        6. h - c - w
    '''
    np_arr = np_arr.copy()
    if mode == 1:
        ret_arr = np.swapaxes(np_arr.reshape(-1, c, w, h, order='C'), axis1=1, axis2=3)
    elif mode == 2:
        ret_arr = np.swapaxes(np_arr.reshape(-1, h, c, w, order='C'), axis1=2, axis2=3)
    elif mode == 3:
        ret_arr = np_arr.reshape(-1, h, w, c, order='C')
    elif mode == 4:
        ret_arr = np.swapaxes(np_arr.reshape(-1, w, h, c, order='C'), axis1=1, axis2=2)
    elif mode == 5:
        # ret_arr = np.swapaxes(np_arr.reshape(-1, w, h, c, order='F'), axis1=1, axis2=2)
        ret_arr = np.swapaxes(np.swapaxes(np_arr.reshape(-1, c, h, w, order='C'), axis1=1, axis2=2), axis1=2, axis2=3)
    elif mode == 6:
        ret_arr = np.swapaxes(np.swapaxes(np_arr.reshape(-1, w, c, h, order='C'), axis1=2, axis2=3), axis1=1, axis2=2)
    else:
        assert 'No such Mode'
    return ret_arr

def data_1d_to_2d(np_arr, r, c, mode=1, copy=False):
    '''
        For Data 1D to 2D, like data used in LSTM,
        e.g: one sample, shape like (1, f*l), then convert to a 2-D array (1, Features, Lags) short for (1, f, l)

        Args:
            np_arr: numpy array, shape like (n, r*c)
            r: rows
            c: columns
            mode: int, 1 to 2
            copy: bool, whether to copy input numpy array

        Return: reshaped array data, shape-like (n, r, c)

        Notes:
            mode:
            1. r - c
            2. c - r
        '''
    np_arr = np_arr.copy()
    if mode == 1:
        ret_arr = np.swapaxes(np_arr.reshape(-1, c, r, order='C'), axis1=1, axis2=2)
    elif mode == 2:
        ret_arr = np_arr.reshape(-1, r, c, order='C')
    else:
        assert 'No such Mode'
    return ret_arr

def maps_normalization(x, first_w=None, num_std = 3):
    '''
    Used to Normalize a 3D Map, shape-like (N, h, w, c)
    Let Data follows N(0, 1), and Bound +/- bound (default 2.58 <=> 3 Standard Deviations)
    '''

    if first_w is None or first_w > x.shape[2]:
        h = x.shape[1]
        w = x.shape[2]
        c = x.shape[3]
        mean_x_c = data_1d_to_3d(np.concatenate([np.mean(np.mean(x, axis=1), axis=1)] * h * w, axis=1), h=h, w=w, c=c, mode=3)
        mean_x2_c = data_1d_to_3d(np.concatenate([np.mean(np.mean(x ** 2, axis=1), axis=1)] * h * w, axis=1), h=h, w=w, c=c, mode=3)
        std_c = (mean_x2_c - mean_x_c ** 2) ** 0.5
        del mean_x2_c
        x_return = np.maximum(np.minimum(x, mean_x_c + num_std * std_c), mean_x_c - num_std * std_c)
        x_return = (x_return - mean_x_c) / std_c
        del std_c, mean_x_c
        x_return[np.isnan(x_return)] = 0
        return x_return
    else:
        h = x.shape[1]
        w = first_w
        c = x.shape[3]
        mean_x_c = data_1d_to_3d(np.concatenate([np.mean(np.mean(x[:, :, :first_w, :], axis=1), axis=1)] * h * w, axis=1), h=h, w=w, c=c, mode=3)
        mean_x2_c = data_1d_to_3d(np.concatenate([np.mean(np.mean(x[:, :, :first_w, :] ** 2, axis=1), axis=1)] * h * w, axis=1), h=h, w=w, c=c, mode=3)
        std_c = (mean_x2_c - mean_x_c ** 2) ** 0.5
        del mean_x2_c
        x_return = np.maximum(np.minimum(x[:, :, :first_w, :], mean_x_c + num_std * std_c), mean_x_c - num_std * std_c)
        x_return = (x_return - mean_x_c) / std_c
        del std_c, mean_x_c
        x_return = np.concatenate((x_return, x[:, :, first_w:, :]), axis=2)
        x_return[np.isnan(x_return)] = 0
        return x_return

# def get_stock_all_market_index(id=1):
#     if id == 0:
#         return stock_market_benchmark['6']
#     elif id == 1:
#         return stock_market_benchmark['002']
#     elif id == 2:
#         return stock_market_benchmark['00']
#     elif id == 3:
#         return stock_market_benchmark['3']
#     else:
#         return None

def stock_code_formatting(stock_code, type=1):
    if type == 1:
        # 添加交易所后缀
        if stock_code[0] == '6':
            stock_code += '.SH'
        else:
            stock_code += '.SZ'
        return stock_code
    elif type == 2:
        # 添加RQ后缀
        if stock_code[0] == '6':
            stock_code += '.XSHG'
        else:
            stock_code += '.XSHE'
        return stock_code
    elif type == 3:
        # 转化后缀，由RQ转为交易所
        stock_code = stock_code.split('.')[0] + '.SH' if stock_code[0] == '6' else stock_code.split('.')[0] + '.SZ'
        return stock_code
    else:
        # 转化后缀，由交易所转为RQ
        stock_code = stock_code.split('.')[0] + '.XSHG' if stock_code[0] == '6' else stock_code.split('.')[0] + '.XSHE'
        return stock_code

# TODO - 股票对应的板块大盘指数
stock_market_benchmark = {'6': '000001',
                          '3': '399102',
                          '002': '399101',
                          '00': '399106'}

def get_stock_corresponding_market_index(stock_code):
    if stock_code[0] == '6':
        return stock_market_benchmark['6']
    elif stock_code[0] == '3':
        return stock_market_benchmark['3']
    elif stock_code[:3] == '002':
        return stock_market_benchmark['002']
    elif stock_code[:3] == '001' or stock_code[:3] == '000' :
        return stock_market_benchmark['00']


def get_previous_trade_date(end_date, n=1):
    data_reader = DataReader()

    trade_date_data = data_reader.read_basic_data_table('processed_trade_date_data')

    trade_date_data = pd.DatetimeIndex(trade_date_data['trade_date'].unique())

    trade_date_data = trade_date_data[trade_date_data <= end_date]

    return trade_date_data[-n].strftime('%Y-%m-%d')

def get_next_trade_date(end_date, n=1):
    data_reader = DataReader()

    trade_date_data = data_reader.read_basic_data_table('processed_trade_date_data')

    trade_date_data = pd.DatetimeIndex(trade_date_data['trade_date'].unique())

    trade_date_data = trade_date_data[trade_date_data >= end_date]

    return trade_date_data[n-1].strftime('%Y-%m-%d')
