#!/user/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import math
import time
from prettytable import PrettyTable


# 递归获取.csv文件存入到list1
import os

# 将所有文件的路径放入到listcsv列表中
def list_dir(file_dir):
    list_csv = []
    dir_list = os.listdir(file_dir)

    # if sort:
    #     dir_list.sort(key=lambda x: int(x.split('.')[0]))

    for cur_file in dir_list:
        path = os.path.join(file_dir, cur_file)
        # 判断是文件夹还是文件
        if os.path.isfile(path):
            # print("{0} : is file!".format(cur_file))
            dir_files = os.path.join(file_dir, cur_file)
        # 判断是否存在.csv文件，如果存在则获取路径信息写入到list_csv列表中
        if os.path.splitext(path)[1] == '.csv':
            csv_file = os.path.join(file_dir, cur_file)
            # print(os.path.join(file_dir, cur_file))
            # print(csv_file)
            list_csv.append(csv_file)
        if os.path.isdir(path):
            # print("{0} : is dir".format(cur_file))
            # print(os.path.join(file_dir, cur_file))
            list_dir(path)

    return list_csv


def txt2csv(self, txtpath, txtfilename):
    txt = np.loadtxt(txtpath + txtfilename + '.txt')
    txtDF = pd.DataFrame(txt)
    txtDF.to_csv(txtpath + txtfilename + '.csv', index=False)

def split_list_by_n(list_collection, n):
# 将集合均分，每份n个元素
# param list_collection:
# param n:
# return:返回的结果为评分后的每份可迭代对象
    for i in range(0, len(list_collection), n):
        yield list_collection[i: i + n]


def data_string_to_float(number_string):
    """
    The result of our regex search is a number stored as a string, but we need a float.
        - Some of these strings say things like '25M' instead of 25000000.
        - Some have 'N/A' in them.
        - Some are negative (have '-' in front of the numbers).
        - As an artifact of our regex, some values which were meant to be zero are instead '>0'.
    We must process all of these cases accordingly.
    :param number_string: the string output of our regex, which needs to be converted to a float.
    :return: a float representation of the string, taking into account minus sign, unit, etc.
    """
    # Deal with zeroes and the sign
    if ("N/A" in number_string) or ("NaN" in number_string):
        return "N/A"
    elif number_string == ">0":
        return 0
    elif "B" in number_string:
        return float(number_string.replace("B", "")) * 1000000000
    elif "M" in number_string:
        return float(number_string.replace("M", "")) * 1000000
    elif "K" in number_string:
        return float(number_string.replace("K", "")) * 1000
    else:
        return float(number_string)


def duplicate_error_check(df):
    """
    A common symptom of failed parsing is when there are consecutive duplicate values. This function was used
    to find the duplicates and tweak the regex. Any remaining duplicates are probably coincidences.
    :param df: the dataframe to be checked
    :return: Prints out a list of the rows containing duplicates, as well as the duplicated values.
    """
    # Some columns often (correctly) have the same value as other columns. Remove these.
    df.drop(
        [
            "Unix",
            "Price",
            "stock_p_change",
            "SP500",
            "SP500_p_change",
            "Float",
            "200-Day Moving Average",
            "Short Ratio",
            "Operating Margin",
        ],
        axis=1,
        inplace=True,
    )

    for i in range(len(df)):
        # Check if there are any duplicates.
        if pd.Series(df.iloc[i] == df.iloc[i].shift()).any():
            duplicates = set(
                [x for x in list(df.iloc[i]) if list(df.iloc[i]).count(x) > 1]
            )
            # A duplicate value of zero is quite common. We want other duplicates.
            if duplicates != {0}:
                print(i, df.iloc[i], duplicates, sep="\n")


def status_calc(stock, sp500, outperformance=10):
    """A simple function to classify whether a stock outperformed the S&P500
    :param stock: stock price
    :param sp500: S&P500 price
    :param outperformance: stock is classified 1 if stock price > S&P500 price + outperformance
    :return: true/false
    """
    if outperformance < 0:
        raise ValueError("outperformance must be positive")
    return stock - sp500 >= outperformance


def convertDBToPkl(collection, filename):
    print('开始读取信息')
    # 1：返回，0：不返回。_id 默认要返回，这里要排除
    cursor = collection.find()
    l = list(cursor)
    df = pd.DataFrame(l)
    df.to_pickle(filename + '.pkl')


def printPrettyDataFrame(df, index_name):
    tb = PrettyTable()
    # 利用prettytable对输出结果进行美化
    tb.add_column(index_name, df.index)
    for col in df.columns.values:  # df.columns.values的意思是获取列的名称
        tb.add_column(col, df[col])
    print(tb)


# 要显示的列
def convertFieldlistToDict(field: list):
    d = {}
    for i in range(len(field)):
        d[field[i]] = 1
    d['_id'] = 0

    return d


# 把一个list等分为n个
def divideList(data_list: list, n: int) -> list:
    length = len(data_list)
    ll = []
    for i in range(n):
        one_list = data_list[math.floor(i / n * length):math.floor((i + 1) / n * length)]
        ll.append(one_list)

    return ll


# 把一个pd.Series等分为n个
# 当score为0，对应的分组list为空
def groupPortfolios2(score: pd.Series, n: int) -> list:
    length = len(score)
    if length < n:
        print('股票代码个数小于分组数，程序中止。')
        return

    l = []
    # 当 length < n时，会出现前n-length个分组list为空list的情况
    for i in range(n):
        one_list = list(score.index)[math.floor(i / n * length):math.floor((i + 1) / n * length)]
        l.append(one_list)

    return l


def groupPortfolios(score: pd.Series) -> tuple:
    n = int(len(score)/5)
    port1 = list(score.index)[: n]
    port2 = list(score.index)[n: 2 * n]
    port3 = list(score.index)[2 * n: -2 * n]
    port4 = list(score.index)[-2 * n: -n]
    port5 = list(score.index)[-n:]
    ports_tuple = (port1, port2, port3, port4, port5)

    return ports_tuple

# pandas中 Convert pandas dataframe to multi-index columns

# pandas计算前内存压缩
def reduce_mem_usage(df):
    starttime = time.time()
    numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
    start_mem = df.memory_usage().sum() / 1024**2
    for col in df.columns:
        col_type = df[col].dtypes
        if col_type in numerics:
            c_min = df[col].min()
            c_max = df[col].max()
            if pd.isnull(c_min) or pd.isnull(c_max):
                continue
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                    df[col] = df[col].astype(np.int64)
            else:
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)
    end_mem = df.memory_usage().sum() / 1024**2
    print('-- Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction),time spend:{:2.2f} min'.format(end_mem,
                       100*(start_mem-end_mem)/start_mem,
           (time.time()-starttime)/60))
    return df



