# -*- coding: utf-8 -*-
#  @author  Bink
#  @date  2020/12/22 9:18 上午
#  @Email : 2641032316@qq.com

from numpy import *
import pandas as pd
import operator
from os import listdir
from scipy.spatial.distance import cdist

from math import log


def createDataSet():  # 简单鉴定数据集
    dataSet = [[1, 1, 'yes'],
               [1, 1, 'yes'],
               [1, 0, 'no'],
               [0, 1, 'no'],
               [0, 1, 'no']]
    labels = ['no surfacing', 'flippers']
    # change to discrete values
    return dataSet, labels


def calcShannonEnt(dataSet: pd.DataFrame):  # 计算给定数据集的香农熵

    numEntries = len(dataSet)  # 计算数据集中的实例总数
    labelCounts = {}  # 创建数据字典，其键值是最后一列的数值
    for currentLabel in dataSet.drop_duplicates():  # the the number of unique elements and their occurance。
        if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0  # 如果当前键值不存在，则扩展字典并将当前键值加入字典，
        labelCounts[currentLabel] += 1  # 每一个键值都记录了当前类别的次数
    # 使用所有类标签发生的频率计算类别出现的概率，计算香农熵
    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries
        shannonEnt -= prob * log(prob, 2)  # log base 2
    return shannonEnt


# reload(trees)
# myDat,labels = createDataSet()
# print(myDat)
# df = pd.DataFrame(myDat)
# # print(df)
# print(df[1].drop_duplicates())
# print(calcShannonEnt(df[1]))


def calSpeed(df_MouseTrack: pd.DataFrame):
    df_x = calDiffenceResult(df_MouseTrack['x'])
    df_y = calDiffenceResult(df_MouseTrack['y'])
    df_t = calDiffenceResult(df_MouseTrack['t'])
    speedlist = []
    for i in range(len(df_MouseTrack)):
        x = df_x.iloc[i].values[0]
        y = df_y.iloc[i].values[0]
        t = df_t.iloc[i].values[0]
        # print('x:', x)
        # print('y:', y)
        # print('t:', t)
        speedlist.append(math.sqrt(x**2 + y**2)/t)
    df = pd.DataFrame(speedlist).fillna(0)
    # print(df)
    return df


def calDiffenceResult(df_in):
    df = pd.DataFrame(df_in)
    df = df.diff().fillna(0)
    # print(df)
    return df

