import configparser
import operator
import pickle
from turtle import shape
from math import log

import joblib
import pandas as pd
from mpmath import zeros
from numpy import tile
from sklearn.cluster import KMeans
from sklearn.datasets._base import load_csv_data, _convert_data_dataframe, DATA_MODULE
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.utils import Bunch


'''
input:
    1. 可投资的资金
    2. 投资的时间(长期/中期/短期)
    3. 风险承受能力(高/中/低)
    4. 投资期望(高/中/低)

return:
    2. 预计收益率
    2. 稳健资产，风险资产的投资比例
'''
# 数据集，用于构造决策树，包含了数据集的各种操作
class dataSet:

    def cal_shannon_ent(data_set):
        # 计算数据集的香农熵
        num_entries = len(data_set)
        label_counts = {}
        # 统计标签值的数量
        for feat_vec in data_set:
            current_label = feat_vec[-1]  # 取最后一列：label标签值
            if current_label not in label_counts.keys():
                label_counts[current_label] = 0
            label_counts[current_label] += 1

        shannon_ent = 0.0
        # 计算香农熵
        for key in label_counts:
            prob = float(label_counts[key]) / num_entries
            shannon_ent -= prob * log(prob, 2)

        return shannon_ent

    # 划分数据集，抽取出符合要求的元素
    def split_data_set(data_set, axis, value):
        '''
        :param data_set:待划分数据集
        :param axis: 特征名称
        :param value: 需要返回的特征值
        :return:
        '''
        ret_data_set = []
        for feat_vec in data_set:
            if feat_vec[axis] == value:
                reduced_feat_vec = feat_vec[:axis]
                reduced_feat_vec.extend(feat_vec[axis + 1:])
                ret_data_set.append(reduced_feat_vec)
        return ret_data_set

    # 选择最好的数据集划分方式
    def chooseBestFeatureToSplit(data_set):
        num_features = len(data_set[0]) - 1  # 特征数量
        base_entropy = dataSet.cal_shannon_ent(data_set)  # 计算数据集的香农熵
        best_info_gain = 0.0  # 信息增益
        best_feature = -1  # 最优特征

        # 特征遍历，计算信息熵
        for i in range(num_features):
            feat_list = [example[i] for example in data_set]
            unique_vals = set(feat_list)  # 提取特征列的所有数据

            new_entropy = 0.0
            for value in unique_vals:
                sub_data_set = dataSet.split_data_set(data_set, i, value)
                prob = len(sub_data_set) / float(len(data_set))
                new_entropy += prob * dataSet.cal_shannon_ent(sub_data_set)
            info_gain = base_entropy - new_entropy

            # 选择最大的信息增益
            if (info_gain > best_info_gain):
                best_info_gain = info_gain
                best_feature = i

        return best_feature  # 返回最优特征的索引

    # 计算出现最多的类别
    def majority_cnt(class_list):
        '''
        :param class_list: 类别列表
        :return: 出现次数最多的类别
        '''
        class_count = {}
        for vote in class_list:
            if vote not in class_count.keys():
                class_count[vote] = 0
            class_count[vote] += 1
        sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
        return sorted_class_count[0][0]


'''
    通过配置文件加载数据
    已经通过测试
'''


def load_data(name):
    '''
    default_path = 'config.ini'
    '''
    data_source = ''
    config = configparser.ConfigParser()

    config.read('../config.ini')

    # 读取配置文件
    try:
        data_source = config.get('data_source', name)
    except:
        print('读取配置文件失败')
    return data_source

# 数据预处理
def clean():
    '''
    datasource: personal_info_data
    :return: cleaned data
    '''
    # read csv file
    data = pd.read_csv(load_data("model_train_data"))
    # use column: age experience income family education
    data = data[["Age", "Experience", "Income", "Family", "Education"]]
    # drop null
    data = data.dropna()
    data=data
    # 降维
    pca = PCA(n_components=2)
    data = pca.fit_transform(data)
    # 保存pca模型
    joblib.dump(pca, '../model/pca_model.pkl')
    data=data
    return data


# 训练用户画像模型
def train():
    data = clean()
    '''
    data: ndarray
    '''
    # 生成训练集
    # train_data = data.sample(frac=1, random_state=0)
    # 二分
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(data)

    k = 2  # Number of clusters

    kmeans = KMeans(n_clusters=k, random_state=42)
    kmeans.fit(X_scaled)

    # Step 4: Retrieve the cluster labels
    cluster_labels = kmeans.labels_

    # Step 5: Divide the data into two groups
    group_1_indices = [i for i, label in enumerate(cluster_labels) if label == 0]
    group_2_indices = [i for i, label in enumerate(cluster_labels) if label == 1]

    group_1_data = data.loc[group_1_indices]
    group_2_data = data.loc[group_2_indices]

    print("Group 1 data:")
    print(group_1_data)

    print("\nGroup 2 data:")
    print(group_2_data)


def train_ndarray():
    data=clean()
    # Step 2: Preprocess the data
    scaler = StandardScaler()
    data_scaled = scaler.fit_transform(data)

    # Step 3: Apply K-means clustering
    k = 2  # Number of clusters
    kmeans = KMeans(n_clusters=k, random_state=42)
    kmeans.fit(data_scaled)

    # 保存聚类模型
    # save the model to disk

    # Step 4: Retrieve the cluster labels
    cluster_labels = kmeans.labels_

    # Optional: Print the cluster labels for each data point
    # save the model to disk
    filename = 'finalized_model.sav'
    pickle.dump(kmeans, open(filename, 'wb'))
    # print("Cluster Labels:")
    print(cluster_labels)


def predict():
    '''
    
    :return: 持仓比例（稳健，博取），回撤比，预期收益，风险类型
    '''


# maybe unnecessary
def normalize(data_set):
    '''
    :param data_set: 数据集
    :method: 转化为z分数
    :return: 归一化后的数据集
    '''
    # read csv
    data_set = pd.read_csv(data_set)
    # 丢弃id列
    data_set = data_set.drop(['id'], axis=1)
    # 丢弃空值
    data_set = data_set.dropna()

    # will pd.to_numeric() work?
    data_set=pd.to_numeric(data_set,errors='coerce')
    data_set=data_set
    print(data_set)
    # 将rate列转化为1,2,3
    # data_set['cost']=data_set['cost'].map({'low': 1, 'medium': 2, 'high': 3})
    # data_set['rate'] = data_set['rate'].map({'low': 1, 'medium': 2, 'high': 3})
    # data_set['risk']=data_set['risk'].map({'low': 1, 'medium': 2, 'high': 3})
    print(data_set)

    return

if __name__ == "__main__":
    normalize("F:/study/Grade2.2/Fise-code/data/data_fake.csv")
    # train_ndarray()

