# -*- coding: utf-8 -*-
# @Time    : 2021/10/23 20:11
# @Author  : hanyunxi
# @description   : COVID-19 病例和死亡人数将每天更新。原始数据由德国罗伯特科赫研究所收集，
# 可以通过国家地理数据平台下载   最早记录的病例是2020-01-24。

# matplotlib 是python2D 绘图领域使用最广泛的库
import matplotlib.pyplot as plt
import pandas as pd
# Numpy Python 语言的一个扩展程序库，支持大量的维度数组与矩阵运算，此外也针对数组运算提供大量的数学函数库。
import numpy as np
# seaborn 是一个基于 matplotlib 的 Python 数据可视化库。
import seaborn as sns
# 解决matplotlib 中文乱码
from matplotlib import font_manager
#statsmodels 是一个 Python 模块，它提供了用于估计许多不同统计模型以及进行统计测试和统计数据探索的类和函数。
from scipy.stats.morestats import Mean
from statsmodels.compat import scipy
from statsmodels.stats.diagnostic import lilliefors
from statsmodels.graphics.gofplots import qqplot
#  引入多项式函数
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# Scipy在 Numpy的基础上增加了众多的数学计算、利学计算以及工程计算中常用的模块, 例如线性代数、常微分方程数值求解、信号处理、图像处理、稀疏矩阵等。
from scipy.stats import norm, kurtosis, kurtosistest, pearsonr, skew, normaltest, chisquare, chi2_contingency, chi2,anderson,kstest
from scipy.stats import norm
import datetime

# 读取原始数据
def read_csv():
    filepath = "../file/covid_de.csv"
    df = pd.read_csv(filepath)
    return df

# 数据清理
def data_clean():
    df = read_csv()
    # print(df.head())
    # 时间格式化
    df['date'] = pd.to_datetime(df['date'].astype(str),format="%Y-%m-%d")
    # 根据 日期升序排序
    df_new = df.sort_values(by=['date'],ascending=True)
    # print("清理前:%d"%df_new.count()[0])
    # 规范化和清理数据
    df_new = df_new.dropna()
    df_new = df_new.reset_index(drop=True)
    # print("清理后:%d"%df_new.count()[0])
    # describe 描述性统计包括总结数据集分布的集中趋势、分散和形状的统计，不包括NaN值。
    # print(df_new.describe(include=[np.number]))
    return df_new


def get_degree(a, b):
    X = a
    y = b

    x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

    rmses = []
    degrees = np.arange(1, 100)
    min_rmse, min_deg = 1e10, 0

    for deg in degrees:

        # Train features
        poly_features = PolynomialFeatures(degree=deg, include_bias=False)
        x_poly_train = poly_features.fit_transform(x_train)

        # Fit data
        poly_reg = LinearRegression()
        poly_reg.fit(x_poly_train, y_train)

        # Get RMSE of test data
        x_poly_test = poly_features.fit_transform(x_test)
        poly_predict = poly_reg.predict(x_poly_test)
        poly_mse = mean_squared_error(y_test, poly_predict)
        poly_rmse = np.sqrt(poly_mse)
        rmses.append(poly_rmse)

        # CV
        if min_rmse > poly_rmse:
            min_rmse = poly_rmse
            min_deg = deg

    # Print calculated DOF
    print('Best DOF {} with RMSE {}'.format(min_deg, min_rmse))

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(degrees, rmses)
    ax.set_yscale('log')
    ax.set_xlabel('Degree')
    ax.set_ylabel('RMSE')
    return min_deg

def analyze_covid_19_data():
    df_new = data_clean()

    # print(df_new.head())
    print("\n")
    # state (州)  county(县) age_group(年龄范围)  gender(性别)
    print(df_new.describe(include=['object']))
    print("cases total：{0}".format(df_new['cases'].sum()))
    print("deaths total:%d"%df_new['deaths'].sum())
    print("Recoveries total: {0}".format(df_new['recovered'].sum()))
    # 目前感染数
    dr_all = df_new['deaths'].sum() + df_new['recovered'].sum()
    print("Currently infected total: %d"%(df_new['cases'].sum() - dr_all))

    # 按性别特定子数据集的拆分数据集
    # two conditions
    # df_male_conditions = df_new['gender'] == 'M'
    # df_female_conditions = df_new['gender'] == 'F'
    df_male = df_new[df_new['gender'] == 'M']
    df_female = df_new[df_new['gender'] == 'F']
    # 重置数据帧的索引，并使用默认索引。
    df_male = df_male.reset_index(drop=True)
    df_female = df_female.reset_index(drop=True)

    # 汇总主数据集和子数据集的唯一日索引的每日报告
    # 数据去重
    unique_df_male = df_male['date'].unique()
    unique_df_female = df_female['date'].unique()
    unique_df = df_new['date'].unique()
    # print(unique_df_male)
    # female dataset
    dates_case_famale = []
    # 筛选 去重后的日期  与 源数据 日期比较  取出 index
    dates_cases_female = []
    for x in unique_df_female:
        filter = np.where(df_female['date'] == x)[0]
        dates_cases_female.append((np.sum(df_female['cases'][filter])))

    dates_case_male = []
    # 取出每天男性的 数据集合
    for y in unique_df_male:
        filter = np.where(df_male['date'] == y)[0]
        dates_case_male.append((np.sum(df_male['cases'][filter])))

    # 女性死亡数 集合
    dates_deaths_female = []
    for x in unique_df_female:
        filter = np.where(df_female['date'] == x)[0]
        dates_deaths_female.append((np.sum(df_female['deaths'][filter])))
    # 男性死亡数 集合
    dates_deaths_male = []
    for y in unique_df_male:
        filter = np.where(df_male['date'] == y)[0]
        dates_deaths_male.append((np.sum(df_male['deaths'][filter])))

    # 女性 恢复数 集合
    dates_recovered_female = []
    for x in unique_df_female:
        filter = np.where(df_female['date'] == x)[0]
        dates_recovered_female.append((np.sum(df_female['recovered'][filter])))

    # 男性 恢复数 集合
    dates_recovered_male = []
    for y in unique_df_male:
        filter = np.where(df_male['date'] == y)[0]
        dates_recovered_male.append((np.sum(df_male['recovered'][filter])))

    # 主数据集
    dates_case = []
    for x in unique_df:
        np.where(df_new['date'] == x)[0]
        dates_case.append((np.sum(df_new['cases'][filter])))

    dates_deaths = []
    for x in unique_df:
        np.where(df_new['date'] == x)[0]
        dates_deaths.append((np.sum(df_new['deaths'][filter])))

    dates_recovered = []
    for x in unique_df:
        np.where(df_new['date'] == x)[0]
        dates_recovered.append((np.sum(df_new['recovered'][filter])))

    # 数据输出
    print("Analytics involving {} days".format(np.array(dates_case).size))
    print("first day  : {}".format(unique_df[0]))
    print("last day  : {}".format(unique_df[unique_df.size - 1]))
    print("")
    #   德国平均每天总数、死亡、恢复 数
    print("On average {} cases per day in Germany".format(np.mean(dates_case)))
    print("On average {} deaths per day in Germany".format(np.mean(dates_deaths)))
    print("On average {} recoveries per day in Germany".format(np.mean(dates_recovered)))

    # 总体死亡率、 男性死亡率 、女性死亡率
    # print("Mortality overall: %f"%((100/df_new['cases'].sum()) * df_new['deaths'].sum()))
    print("Mortality overall: %f"% (df_new['deaths'].sum() / df_new['cases'].sum() * 100))
    print("Mortality females: %f"% (df_female['deaths'].sum() / df_female['cases'].sum() * 100))
    print("Mortality males: %f"% (df_male['deaths'].sum() / df_male['cases'].sum() * 100))

def drawing_show():
    df_new  = data_clean()
    # 1. 数据分布图解分析
    # 设置大小
    plt.figure(figsize=(20,20))
    # distplot 已弃用：灵活地绘制观察的单变量分布。
    sns.histplot(df_new['cases'],kde=False)
    plt.show()

    # 绘制主要数据集报告回收率的分布
    plt.figure(figsize=(20, 20))
    sns.distplot(df_new['recovered'], kde=False)

    # 绘制每个州报告病例的平均值。与大均值比较
    unique = df_new['state'].unique()
    classes_mean = []
    for state in unique:
        # print(state)
        filter = np.where(df_new['state'] == state)[0]
        # 求平均数
        classes_mean.append(np.average((df_new['cases'][filter])))
    classes_mean = np.array(classes_mean)

    #设置绘图大小
    plt.figure(figsize=(25,10))
    height = np.sort(classes_mean)
    # 对 原数组 排序
    bars = unique[np.argsort(classes_mean)]
    # 设置y轴范围
    y_pos = np.arange(len(bars))
    avg = np.average(classes_mean)
    # 设置 X轴 线  线型：虚线 行宽
    plt.axhline(avg,color='k',linestyle="dashed",linewidth=1)
    # plt.text(10,avg,avg,fontsize=20,va="center",ha="center",backgroudcolor="w")
    plt.text(10, avg, avg, fontsize=20, va='center', ha='center', backgroundcolor='w')
    # 设置 y轴
    plt.bar(y_pos,height)
    # 设置 标题  x/y轴
    plt.title('Mean of daily reported cases per state')
    plt.xlabel('State')
    plt.ylabel('Mean of cases per day')
    # 设置 x轴 旋转 45度
    plt.ylim(0, 20)
    plt.xticks(y_pos, bars, rotation=45)
    plt.show()

# 报告病例的性别比例
def gender_rate_case():
    df_new = data_clean()
    df_male = df_new[df_new['gender'] == 'M']
    df_female = df_new[df_new['gender'] == 'F']
    # 1794405 1905585
    # sum_male = sum(np.array(df_male['cases']))
    # sum_female = sum(np.array(df_female['cases']))

    sum_male = df_male['cases'].sum()
    sum_female = df_female['cases'].sum()
    gender_counts = df_new['gender'].value_counts()
    values = []
    labels = []
    values.append(sum_female)
    values.append(sum_male)
    for x in gender_counts.index:
        labels.append(x)
    # 绘制饼图
    explode = (0.1,0.1)
    # matplotlib 无中文字节码  需手动导入
    fontpath = "../font/simsun.ttc"
    font = font_manager.FontProperties(fname=fontpath)
    plt.title("报告病例的性别比例",fontproperties=font)
    weight_dict = {
        "edgecolor":"black",
        "linewidth":2
    }
    plt.pie(values,labels=labels,explode=explode,autopct="%1.1f%%",wedgeprops=weight_dict, shadow=True,startangle=90)
    plt.show()

#报告死亡的性别比例
def gender_death_rate():
    df = data_clean()
    df_new = df.groupby(by="gender")['deaths'].sum()
    values = []
    labels = []
    for index in df_new.index:
        labels.append(index)
        values.append(df_new[index])
    # 设置title
    font = font_manager.FontProperties(fname='../font/simsun.ttc')
    plt.title("报告死亡的性别比例",fontproperties=font)
    # (每一块)离开中心距离
    explode = (0.1,0.1)
    # 设置 拼图
    # 传递给wedge对象用来画一个饼图 设置线条颜色 线宽
    wedgeprops = {
        "edgecolor":"black",
        "linewidth": 2,
    }
    #autopct :控制饼图内百分比设置,可以使用format字符串或者format function'%1.1f'指小数点前后位数(没有用空格补齐)；
    plt.pie(values,labels=labels,explode=explode,autopct="%1.2f%%",shadow=True,wedgeprops=wedgeprops,startangle=90)
    plt.show()
# 报告恢复率的性别比例
def gender_recovered_rate():
    df = data_clean()
    df_new = df.groupby(by="gender")['recovered'].sum()
    values = []
    labels = []
    for index in df_new.index:
        labels.append(index)
        values.append(df_new[index])
    # 设置title
    font = font_manager.FontProperties(fname='../font/simsun.ttc')
    plt.title("报告恢复率的性别比例",fontproperties=font)
    # (每一块)离开中心距离
    explode = (0.1,0.1)
    # 设置 拼图
    # 传递给wedge对象用来画一个饼图 设置线条颜色 线宽
    wedgeprops = {
        "edgecolor":"black",
        "linewidth": 2,
    }
    #autopct :控制饼图内百分比设置,可以使用format字符串或者format function'%1.1f'指小数点前后位数(没有用空格补齐)；
    plt.pie(values,labels=labels,explode=explode,autopct="%1.2f%%",shadow=True,wedgeprops=wedgeprops,startangle=90)
    plt.show()

#不同色调的时域的年龄范围的case分析
def analyze_case_by_age_group():
    df = data_clean()
    # 创建一个图窗和一组子图
    fig,ax = plt.subplots(figsize=(20,15))
    sns.set_style("darkgrid")
    # ci="sd"
    ci = 95
    # data: datapandas.DataFrame, numpy.ndarray, mapping, or sequence  ci:int 或 "sd" 或无 与估算器聚合时绘制的置信区间大小。"sd"是指绘制数据的标准偏差。设置为None将跳过引导。
    # hue: vector or key in data   hue_order:指定色调语义的分类级别的处理和绘图顺序。
    sns.lineplot(ax=ax,data=df,x="date",y="cases",hue="age_group",ci=ci,hue_order=['80-99','60-79','35-59','15-34','05-14','00-04'])
    font = font_manager.FontProperties(fname="../font/simsun.ttc")
    plt.title("不同色调的时域的年龄范围的case分析",fontproperties = font)
    # 设置X轴旋转 45度
    plt.xticks(rotation=45)
    plt.show()

# 与性别有关的案件
def gender_cases():
    df = data_clean()
    # 创建一个图窗和一组子图。
    fig, ax = plt.subplots(figsize=(16, 9))
    sns.set_style("darkgrid")
    # ci 与估计器聚合时要绘制的置信区间的大小。 “sd”表示绘制数据的标准偏差。设置为 None 将跳过引导。
    # ci = "sd"
    ci = 95
    sns.lineplot(ax=ax,data=df,x="date",y="cases",hue="gender",ci=ci)
    plt.title("Cases in relation to gender ")
    # 设置 X 轴 旋转
    plt.xticks(rotation=45)
    plt.show()

# 与年龄组相关的死亡人数
def death_agegroup_relation():
    df = data_clean()
    fig,ax = plt.subplots(figsize=(16,9))
    sns.set_style("darkgrid")
    # ci = "sd"
    ci = 95
    sns.lineplot(ax=ax,data=df,x="date",y="deaths",hue="age_group",ci=ci,hue_order=['80-99','60-79','35-59','15-34','05-14','00-04'])
    plt.title("Deaths in relation to age group")
    plt.xticks(rotation=45)
    plt.show()

#与性别相关的死亡人数
def death_gender_relation():
    df = data_clean()
    # 创建一个图窗和一组子图。
    fig,ax = plt.subplots(figsize=(16,9))
    sns.set_style("darkgrid")
    # ci = "sd"
    ci = 95
    sns.lineplot(ax=ax,data=df,x="date",y="deaths",hue="gender",ci=ci)
    plt.title("Deaths in relation to gender")
    plt.xticks(rotation=45)
    plt.show()


#与年龄组有关的恢复数
def recoveries_age_group_relation():
    df = data_clean()
    # 创建一个图窗和一组子图。
    fig,ax = plt.subplots(figsize=(16,9))
    sns.set_style("darkgrid")
    # ci = "sd"
    ci = 95
    sns.lineplot(ax=ax,data=df,x="date",y="recovered",ci=ci,hue="age_group",hue_order=['80-99','60-79','35-59','15-34','05-14','00-04'])
    plt.title("Recoveries in relation to age group")
    plt.xticks(rotation=45)
    plt.show()

#与性别有关的恢复数
def recoveries_gender_Relation():
    df = data_clean()
    # 创建一个图窗和一组子图。
    fig,ax = plt.subplots(figsize=(16,9))
    sns.set_style("darkgrid")
    # ci = "sd"
    ci = 95
    sns.lineplot(ax=ax,data=df,x="date",y="recovered",ci=ci,hue="gender")
    plt.title("Recoveries in relation to gender")
    plt.xticks(rotation=45)
    plt.show()

# Check data distributions of non aggregated main dataset
# 检查非聚合主数据集的数据分布
def case_distributions_demo(field):
    df = data_clean()
    # line : {None, "45", "s", "r", "q"}  s : 标准线  r 拟合回归线 q 一条线穿过四分位数
    qqplot(df[field],line='s')
    plt.title(f"{field} distributions")
    plt.show()

def case_distributions_array(field):
    df = data_clean()
    qqplot(np.array(df[field]),line='s')
    plt.title(f"{field} array data distributions")
    plt.show()


# 绘制主要数据集汇总报告病例的分布
def case_distributions_histogram():
    df = data_clean()
    data_case = []
    data_deaths = []
    data_recoveries = []
    titles = ['cases','deaths','recovered']
    date_unique = df['date'].unique()
    for x in date_unique:
        filter = np.where(df['date'] == x)[0]
        data_case.append((np.sum(df['cases'][filter])))
        data_deaths.append((np.sum(df['deaths'][filter])))
        data_recoveries.append((np.sum(df['recovered'][filter])))
    plt.figure(figsize=(16,9))
    sns.histplot(data=data_recoveries,kde=False)
    plt.title(f"Plot distribution of aggregated reported {titles[2]} for main dataset")
    plt.show()

# 从汇总的报告病例数据中绘制感染波
def case_waves_report():
    df = data_clean()
    unique = df['date'].unique()
    unique_df_female = df[df['gender'] == 'F']['date'].unique()
    unique_df_male = df[df['gender'] == 'M']['date'].unique()

    data_case = []
    data_female = []
    data_male = []
    date_unique = df['date'].unique()
    for x in date_unique:
        filter = np.where(df['date'] == x)[0]
        data_case.append((np.sum(df['cases'][filter])))

    for x in unique_df_female:
        filter = np.where(df['date'] == x)[0]
        data_female.append((np.sum(df['cases'][filter])))

    for x in unique_df_male:
        filter = np.where(df['date'] == x)[0]
        data_male.append((np.sum(df['cases'][filter])))

    # 设置  index
    # plot_x = unique.reshape(-1,1)
    # day_index = np.arange(0,plot_x.size,1)
    day_index = np.arange(0,unique.shape[0],1)
    # 返回一个包含具有新形状的相同数据的数组。
    day_index = day_index.reshape(-1,1)

    day_female_index = np.arange(0,unique_df_female.shape[0],1)
    day_female_index = day_female_index.reshape(-1,1)

    day_male_index = np.arange(0,unique_df_male.shape[0],1)
    day_male_index = day_male_index.reshape(-1,1)

    # 动态传参  all_case  female_case  male_case
    all_index = {'0':day_index,'1':day_female_index,'2':day_male_index}
    all_df = {'0':data_case,'1':data_female,'2':data_male}
    titles = {"0":"all_case","1":"female_case","2":"male_case"}

    # print(day_female_index)
    plt.rcParams.update(plt.rcParamsDefault)
    plt.clf()
    fig,ax = plt.subplots(figsize=(12,9))
    sns.lineplot(ax=ax,x=np.arange(0,all_index['1'].size,1),y=all_df['1'])

    plt.vlines(x=30,ymin=all_df['1'][30],ymax=40000,label="Prob. wave 1",color='k',linestyles="dashed",linewidth=2)
    plt.text(10,40000,"Prob. wave 1",fontsize= 20,va="center",ha="center",backgroundcolor="w")

    plt.vlines(x=230,ymin=all_df['1'][230],ymax=40000,label="Prob. wave 1",color='k',linestyles="dashed",linewidth=2)
    plt.text(210,40000,"Prob. wave 2",fontsize= 20,va="center",ha="center",backgroundcolor="w")

    plt.vlines(x=380,ymin=all_df['1'][380],ymax=40000,label="Prob. wave 1",color='k',linestyles="dashed",linewidth=2)
    plt.text(360,40000,"Prob. wave 3",fontsize= 20,va="center",ha="center",backgroundcolor="w")
    plt.title(f"Plot infection waves from aggregated reported {titles['1']} data")
    plt.show()

    # 根据自己的解释开始波浪！以后更准确的识别！
    print('Start wave 1 ca. {}'.format(unique[30]))
    print('Start wave 2 ca. {}'.format(unique[230]))
    print('Start wave 3 ca. {}'.format(unique[380]))

# 使用多项式回归对波浪分布进行高级分析
def analyze_waves_distribution_by_polynomial():
    df = data_clean()
    unique = df['date'].unique()
    unique_df_female = df[df['gender'] == 'F']['date'].unique()
    unique_df_male = df[df['gender'] == 'M']['date'].unique()

    data_case = []
    data_female = []
    data_male = []
    date_unique = df['date'].unique()
    for x in date_unique:
        filter = np.where(df['date'] == x)[0]
        data_case.append((np.sum(df['cases'][filter])))

    for x in unique_df_female:
        filter = np.where(df['date'] == x)[0]
        data_female.append((np.sum(df['cases'][filter])))

    for x in unique_df_male:
        filter = np.where(df['date'] == x)[0]
        data_male.append((np.sum(df['cases'][filter])))

    # 设置  index
    # plot_x = unique.reshape(-1,1)
    # day_index = np.arange(0,plot_x.size,1)
    day_index = np.arange(0, unique.shape[0], 1)
    # 返回一个包含具有新形状的相同数据的数组。
    day_index = day_index.reshape(-1, 1)

    day_female_index = np.arange(0, unique_df_female.shape[0], 1)
    day_female_index = day_female_index.reshape(-1, 1)

    day_male_index = np.arange(0, unique_df_male.shape[0], 1)
    day_male_index = day_male_index.reshape(-1, 1)

    #通过模拟开头提到的不同 DOF 值来获得
    # 复原案例
    poly_reg = PolynomialFeatures(degree=get_degree(day_index,data_case))
    X_poly = poly_reg.fit_transform(day_index)
    pol_reg = LinearRegression()
    pol_reg.fit(X_poly,data_case)
    fig,ax =plt.subplots(figsize=(16,9))
    plt.scatter(day_index, data_case, color='red')
    plt.plot(day_index, pol_reg.predict(poly_reg.fit_transform(day_index)), color='blue')
    plt.title('Polynomial Reg')
    plt.xlabel('Time index')
    plt.ylabel('Reported cases')
    plt.show()

# 使用多项式回归对波浪分布进行高级分析 recoveries data
def analyze_recoveries_waves_distribution_by_polynomial():
    df = data_clean()
    unique = df['date'].unique()
    date_unique = df['date'].unique()

    day_index = np.arange(0, unique.shape[0], 1)
    # 返回一个包含具有新形状的相同数据的数组。
    day_index = day_index.reshape(-1, 1)

    data_cases = []
    data_recoveries = []
    data_deaths = []
    for x in date_unique:
        filter = np.where(df['date'] == x)[0]
        data_cases.append((np.sum(df['cases'][filter])))
        data_recoveries.append((np.sum(df['recovered'][filter])))
        data_deaths.append((np.sum(df['deaths'][filter])))

    all_data = {"0":data_cases,"1":data_recoveries,"2":data_deaths}
    ylabels = {"0":"cases","1":"recoveries","2":"deaths"}
    #  使用 PolynomialFeatures 预创建。该预处理器将输入数据矩阵转换为给定度的新数据矩阵。
    poly_reg = PolynomialFeatures(degree=get_degree(day_index,all_data["2"]))
    x_poly = poly_reg.fit_transform(day_index)
    # 拟合一个带有系数 w = (w_1, ..., w_p) 的线性模型，使得数据集实际观测数据和预测数据（估计值）之间的残差平方和最小。其数学表达式为:
    pol_reg = LinearRegression()
    #  LinearRegression 会调用 fit 方法来拟合数组 X， y，并且将线性模型的系数 w 存储在其成员变量 coef_ 中:
    pol_reg.fit(x_poly,all_data["2"])

    fig,ax = plt.subplots(figsize=(16,9))
    # y 与 x 的散点图，具有不同的标记大小和/或颜色。
    plt.scatter(day_index,all_data["2"],color="red")
    # predict  使用线性模型进行预测。  fit_transform 适合数据，然后转换它。
    plt.plot(day_index,pol_reg.predict(poly_reg.fit_transform(day_index)),color="blue")
    plt.title("Polynomial Reg")
    plt.xlabel("Time index")
    plt.ylabel(f"Reported {ylabels['2']}")
    plt.show()


# 数据集的非图形分析
def non_graphical_analytics():
    df = data_clean()
    # Kurtosis Using Fisher: Norm => 0.0
    # 计算数据集的峰度（Fisher 或 Pearson）
    print("Kurtosis cases: {}".format(kurtosis(df['cases'],fisher=True)))
    print("Kurtosis deaths: {}".format(kurtosis(df['deaths'],fisher=True)))
    print("Kurtosis recoveries: {}".format(kurtosis(df['recovered'],fisher=True)))
    print("")
    # Using Pearson: Norm => 3.0
    print("Kurtosis cases: {}".format(kurtosis(df['cases'],fisher=False)))
    print("Kurtosis deaths: {}".format(kurtosis(df['deaths'],fisher=False)))
    print("Kurtosis recoveries: {}".format(kurtosis(df['recovered'],fisher=False)))
    print("")
    #极度瘦身
    # Skew
    print("Skew Cases: %.3f"%skew(df['cases']))
    print("Skew deaths: %.3f"%skew(df['deaths']))
    print("Skew recoveries: %.3f"%skew(df['recovered']))
    print("")
    # Right Skewed data
    # Check for type of destribution. Specify H0 hypothesis as: Data come from [insert type of distribution here] distribution
    # 来自特定分布的数据的 anderson-Darling 检验。
    print('Test whether cases data is normal disrtibuted: {}'.format(anderson(df['cases'], dist='norm')))
    print('Test whether cases data is gumbel disrtibuted: {}'.format(anderson(df['cases'], dist='gumbel')))
    print('Test whether cases data is exponential disrtibuted: {}'.format(anderson(df['cases'], dist='expon')))
    print('Test whether cases data is logisic disrtibuted: {}'.format(anderson(df['cases'], dist='logistic')))
    print("")
    print('Test whether deaths data is normal disrtibuted: {}'.format(anderson(df['deaths'], dist='norm')))
    print('Test whether deaths data is gumbel disrtibuted: {}'.format(anderson(df['deaths'], dist='gumbel')))
    print('Test whether deaths data is exponential disrtibuted: {}'.format(anderson(df['deaths'], dist='expon')))
    print('Test whether deaths data is logisic disrtibuted: {}'.format(anderson(df['deaths'], dist='logistic')))
    print("")
    print('Test whether recovered data is normal disrtibuted: {}'.format(anderson(df['recovered'], dist='norm')))
    print('Test whether recovered data is gumbel disrtibuted: {}'.format(anderson(df['recovered'], dist='gumbel')))
    print('Test whether recovered data is exponential disrtibuted: {}'.format(anderson(df['recovered'], dist='expon')))
    print('Test whether recovered data is logisic disrtibuted: {}'.format(anderson(df['recovered'], dist='logistic')))

#   假设 H0 的 Kolmogorov-Smirnov 检验：数据来自正态分布总体
def normal_distributed_population_count():
    df = data_clean()
    # cases 求平均值
    case_avg = np.mean(df['cases'])
    death_avg = np.mean(df['deaths'])
    recovered_avg = np.mean(df['recovered'])
    #标准差是一组数据平均值分散程度的一种度量。
    # 标准差是方差的算术平方根。 std = sqrt(mean((x - x.mean())**2))
    # 如果数组是 [1，2，3，4]，则其平均值为 2.5。
    # 因此，差的平方是 [2.25,0.25,0.25,2.25]，并且再求其平均值的平方根除以 4，即 sqrt(5/4) ，结果为 1.1180339887498949。
    cases_std = np.std(df['cases'])
    deaths_std = np.std(df['deaths'])
    recovered_std = np.std(df['recovered'])
    # 执行（一个样本或两个样本）Kolmogorov-Smirnov 检验的拟合优度。
    k_cases = kstest(df['cases'],'norm', args=(case_avg, cases_std))
    k_deaths = kstest(df['deaths'],'norm', args=(death_avg, deaths_std))
    k_recovered=kstest(df['recovered'],'norm', args=(recovered_avg, recovered_std))

    print('K-Test cases: {}'.format(k_cases))
    print('K-Test deaths: {}'.format(k_deaths))
    print('K-Test recovered: {}'.format(k_recovered))
    print("-----------------------------------------------------------------------")
    # 使用 D'Agostino Omnibus Test 检查正态分布（H0/H1 保持不变）
    # normaltest 测试样本是否不同于正态分布。
    stat,p = normaltest(df['cases'])
    print('Statistics=%.3f, p=%.3f' % (stat, p))
    alpha = 0.05
    if p > alpha:
        print('Cases data is normal distributed (Can not reject H0)')
    else:
        print('Cases data is not normal distributed (Reject H0)')

    stat,p = normaltest(df['deaths'])
    print('Statistics=%.3f, p=%.3f' % (stat, p))
    print("")
    alpha = 0.05
    if p > alpha:
        print('deaths data is normal distributed (Can not reject H0)')
    else:
        print('deaths data is not normal distributed (Reject H0)')

    stat,p = normaltest(df['recovered'])
    print('Statistics=%.3f, p=%.3f' % (stat, p))
    print("")
    alpha = 0.05
    if p > alpha:
        print('recovered data is normal distributed (Can not reject H0)')
    else:
        print('recovered data is not normal distributed (Reject H0)')
    print("----------------------------------------------------------------")
    # 最后使用 lilliefors 检验来验证 H0 不合适
    #  使用 Lilliefors 检验假设正态分布或指数分布。
    print('Cases: {}'.format(lilliefors(df['cases'], pvalmethod='table')))
    print('Deaths: {}'.format(lilliefors(df['deaths'], pvalmethod='table')))
    print('Recoveries: {}'.format(lilliefors(df['recovered'], pvalmethod='table')))

    # P 值总是 < alpha=0.05。拒绝 H0
    # 分析相关性
    #Pearson 相关系数 [1]_ 衡量两个数据集之间的线性关系。 p 值的计算依赖于每个数据集呈正态分布的假设。
    # （有关输入的非正态性对相关系数分布的影响的讨论，请参见 Kowalski [3]_。）与其他相关系数一样，
    # 该系数在 -1 和 +1 之间变化，0 表示没有相关性。 -1 或 +1 的相关性意味着精确的线性关系。
    # 正相关意味着随着 x 的增加，y 也会增加。
    # 负相关意味着随着 x 增加，y 减少。
    print("Pearson correlation cases/recoveries: {}".format(pearsonr(df['cases'],df['recovered'])[0]))
    print("Pearson correlation cases/deaths: {}".format(pearsonr(df['cases'],df['deaths'])[0]))
    print("Pearson correlation deaths/recovered: {}".format(pearsonr(df['deaths'],df['recovered'])[0]))
    print(df.corr())
    # 非汇总数据显示除病例/康复外几乎没有相关性

# 使用聚合数据集 分析数据
def aggregated_data():
    # 来自德国各地每一天的病例/死亡/康复总数。
    df = data_clean()
    date_unique = df['date'].unique()

    data_cases = []
    data_recoveries = []
    data_deaths = []
    for x in date_unique:
        filter = np.where(df['date'] == x)[0]
        data_cases.append((np.sum(df['cases'][filter])))
        data_recoveries.append((np.sum(df['recovered'][filter])))
        data_deaths.append((np.sum(df['deaths'][filter])))

    print('Kurtosis cases aggreg. data: {}'.format(kurtosis(data_cases, fisher=True)))
    print('Kurtosis deaths aggreg. data: {}'.format(kurtosis(data_deaths, fisher=True)))
    print('Kurtosis recoveries aggreg. data: {}'.format(kurtosis(data_recoveries, fisher=True)))

    print('Kurtosis cases aggreg. data: {}'.format(kurtosis(data_cases, fisher=False)))
    print('Kurtosis deaths aggreg. data: {}'.format(kurtosis(data_deaths, fisher=False)))
    print('Kurtosis recoveries aggreg. data: {}'.format(kurtosis(data_recoveries, fisher=False)))
    # res
    # Cases/Recoveries mostly STD, Platykurtic
    # Deaths seems like hyperbolic or laplace, Leptokurtic

    # 计算数据集的样本偏度
    print('Skew Cases: %.3f' % skew(data_cases))
    print('Skew Recovered: %.3f' % skew(data_deaths))
    print('Skew Deaths: %.3f' % skew(data_recoveries))

    # 分析相关聚合数据
    # Pearson 正相关意味着随着 x 的增加，y 也会增加。负相关意味着随着 x 增加，y 减少。
    print("Pearson correlation cases/recoveries: {}".format(pearsonr(data_cases,data_recoveries)[0]))
    print("Pearson correlation cases/deaths: {}".format(pearsonr(data_cases,data_deaths)[0]))
    print("Pearson correlation deaths/recoveries: {}".format(pearsonr(data_deaths,data_recoveries)[0]))












if __name__ == "__main__":
    # analyze_covid_19_data()
    # drawing_show()
    # gender_rate_case()
    # gender_death_rate()
    # gender_recovered_rate()
    # analyze_case_by_age_group()
    # gender_cases()
    # death_agegroup_relation()
    # death_gender_relation()
    # recoveries_age_group_relation()
    # recoveries_gender_Relation()

    #  data distributions
    # field = "cases"
    # field = "deaths"
    # field = "recovered"
    # case_distributions_demo(field)
    # case_distributions_array(field)

    # case_distributions_histogram()

    # case_waves_report()

    # analyze_waves_distribution_by_polynomial()
    # analyze_recoveries_waves_distribution_by_polynomial()

    # non_graphical_analytics()

    # normal_distributed_population_count()
    aggregated_data()