import datetime
import os

import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib.axis import Axis
from osgeo import gdal, osr
from tqdm import tqdm
import seaborn as sns
plt.rcParams['xtick.direction'] = 'in'#将x周的刻度线方向设置向内
plt.rcParams['ytick.direction'] = 'in'#将y轴的刻度方向设置向内
# 统一设置字体
plt.rcParams["font.family"] = "Microsoft YaHei"
# 统一设置轴刻度标签的字体大小
# plt.rcParams['xtick.labelsize'] = 10
# plt.rcParams['ytick.labelsize'] = 10
# # 统一设置xy轴名称的字体大小
# plt.rcParams["axes.labelsize"] = 20
# # 统一设置轴刻度标签的字体粗细
# plt.rcParams["axes.labelweight"] = "light"

# 统一设置字体

def get_OZNET_data(stm_dir):
    station_name = os.listdir(stm_dir)
    result_df = pd.DataFrame(columns=['station', 'date', 'lon', 'lat', 'mean'])
    for station in station_name:
        station_path = os.path.join(stm_dir, station)
        if os.path.isdir(station_path):
            file_list = [os.path.join(station_path, f) for f in os.listdir(station_path) if f.endswith('.stm')]
            for file in file_list:
                if '0.00' in file and '0.05' in file:
                    print(station)
                    head = pd.read_csv(file, sep='\s+', header=None, nrows=1)
                    lat = head.iloc[0, 3]
                    lon = head.iloc[0, 4]
                    # elev = head.iloc[0, 5]
                    # start_deep = head.iloc[0, 6]
                    # end_deep = head.iloc[0, 7]
                    # print(lat, lon, elev, start_deep, end_deep)
                    body = pd.read_csv(file, sep=' ', header=None, skiprows=1)
                    body_mean = body.groupby(0)[2].mean().reset_index()
                    for index, row in body_mean.iterrows():
                        date = row[0].split('/')[0] + row[0].split('/')[1] + row[0].split('/')[2]
                        mean = row[2]
                        result_df = result_df.append({'station': station, 'date': date, 'lon': lon, 'lat': lat, 'mean': mean}, ignore_index=True)
    result_df.to_csv(r"G:\test\SMAP_NEW\OZNET_data_2014.csv", index=False)


# 根据像元行列号计算经纬度
def pixel2coord(geo_transform, col, row):
    x_origin, pixel_width, x_rotation, y_origin, y_rotation, pixel_height = geo_transform
    lon = x_origin + pixel_width * col
    lat = y_origin + pixel_height * row
    return lon, lat


# 根据经纬度计算像元行列号
def coord2pixel(geo_transform, lon, lat):
    col = int((lon - geo_transform[0]) / geo_transform[1])
    row = int((lat - geo_transform[3]) / geo_transform[5])
    return col, row


def getSRSPair(dataset):
    """
    获得给定数据的投影参考系和地理参考系
    :param dataset: GDAL地理数据
    :return: 投影参考系和地理参考系
    """
    prosrs = osr.SpatialReference()
    prosrs.ImportFromWkt(dataset.GetProjection())
    geosrs = prosrs.CloneGeogCS()
    return prosrs, geosrs


def lonlat2geo(dataset, lon, lat):
    """
    将经纬度坐标转为投影坐标（具体的投影坐标系由给定数据确定）
    :param dataset: GDAL地理数据
    :param lon: 地理坐标lon经度
    :param lat: 地理坐标lat纬度
    :return: 经纬度坐标(lon, lat)对应的投影坐标
    """
    prosrs, geosrs = getSRSPair(dataset)
    ct = osr.CoordinateTransformation(geosrs, prosrs)
    coords = ct.TransformPoint(lon, lat)
    return coords[:2]


# 读取降尺度结果到excel表格
def read_tif_to_excel(tif_dir, excel_path, col_name):
    ctp_df = pd.read_csv(excel_path)
    # ctp_df = pd.read_excel(excel_path)
    # sm_list = [os.path.join(sm_dir, i) for i in os.listdir(sm_dir) if i.endswith(".tif")]
    if os.path.isdir(tif_dir):
        for index, row in tqdm(ctp_df.iterrows()):
            date = row["date"]
            lon = row["lon"]
            lat = row["lat"]
            # 根据日期计算儒略日
            targetday = datetime.date(int(str(date)[0:4]), int(str(date)[4:6]), int(str(date)[6:8]))
            day = targetday - datetime.date(targetday.year - 1, 12, 31)
            julian_day = str(date)[0:4] + str(day.days)

            # sm_path = os.path.join(sm_dir, 'reconstructed_sm_{}'.format(julian_day) + ".tif")
            # sm_path = os.path.join(sm_dir, julian_day + ".tif")
            if col_name == 'lai':
                tif_path = os.path.join(tif_dir, 'Lai_' + str(date) + ".tif")
            else:
                tif_path = os.path.join(tif_dir, str(date) + ".tif")
            if not os.path.exists(tif_path):
                continue
            tif_dataset = gdal.Open(tif_path)
            tif_data = tif_dataset.ReadAsArray()
            tif_geo = tif_dataset.GetGeoTransform()
            col, row = coord2pixel(tif_geo, lon, lat)
            try:
                tif_value = tif_data[row, col]
                # if 0 <= tif_value <= 1:
                ctp_df.loc[index, col_name] = tif_value
            except:
                continue
    else:
        tif_dataset = gdal.Open(tif_dir)
        tif_data = tif_dataset.ReadAsArray()
        tif_geo = tif_dataset.GetGeoTransform()
        for index, row in tqdm(ctp_df.iterrows()):
            lon = row["lon"]
            lat = row["lat"]
            # date = row["date"]
            # if int(str(date)[0:4]) != 2015:
            #     continue
            col, row = coord2pixel(tif_geo, lon, lat)
            try:
                tif_value = tif_data[row, col]
                # if 0 <= tif_value <= 1:
                ctp_df.loc[index, col_name] = tif_value
            except:
                continue
    print(ctp_df)
    ctp_df.to_csv(excel_path, index=False)
    print("写入完成")


# 相关性分析
def corr_analys(excel_path):
    ctp_df = pd.read_csv(excel_path)
    # 计算皮尔逊相关系数
    ctp_df['mean'] = ctp_df['mean'].astype(float)
    ctp_df['smap_clip'] = ctp_df['smap_clip'].astype(float)
    ctp_df['evi_downscal'] = ctp_df['evi_downscal'].astype(float)
    ctp_df['ndvi_downscal'] = ctp_df['ndvi_downscal'].astype(float)
    ctp_df['msavi_downscal'] = ctp_df['msavi_downscal'].astype(float)
    ctp_df['nirv_downscal'] = ctp_df['nirv_downscal'].astype(float)
    sm_df = ctp_df[['mean', 'mcca_clip', 'evi_downscal', 'ndvi_downscal', 'msavi_downscal', 'nirv_downscal']]
    # sm_df = ctp_df[['mean', 'cci']]
    # 去掉存在nan的行
    sm_df = sm_df.dropna(axis=0, how='any')
    print(sm_df)
    # 计算相关系数
    corr = sm_df.corr(method='pearson')
    # 拟合系数
    measure_cci_coeff = np.polyfit(sm_df['mean'], sm_df['mcca_clip'], 1)
    print(measure_cci_coeff)
    # 拟合结果
    measure_cci_fit = measure_cci_coeff[0] * sm_df['mean'] + measure_cci_coeff[1]

    measure_argmin = np.argmin(sm_df['mean'])
    measure_argmax = np.argmax(sm_df['mean'])

    cci_argmin = np.argmin(sm_df['mcca_clip'])
    cci_argmax = np.argmax(sm_df['mcca_clip'])

    # 绘制结果
    # plt.title("Measure-CCI")
    # plt.xlabel("Measure")
    # plt.ylabel("CCI")
    # plt.scatter(sm_df['mean'], sm_df['cci'], s=5, c='b')
    # plt.plot([0, 1], [0, 1], 'k--')
    # plt.plot([sm_df['mean'].to_numpy()[measure_argmin], sm_df['mean'].to_numpy()[measure_argmax]],
    #          [measure_cci_fit.to_numpy()[measure_argmin], measure_cci_fit.to_numpy()[measure_argmax]], c='r')
    # plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff[0], measure_cci_coeff[1]))
    # plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean']['cci']))

    # 拟合系数
    measure_evi_coeff = np.polyfit(sm_df['mean'], sm_df['evi_downscal'], 1)
    measure_ndvi_coeff = np.polyfit(sm_df['mean'], sm_df['ndvi_downscal'], 1)
    measure_msavi_coeff = np.polyfit(sm_df['mean'], sm_df['msavi_downscal'], 1)
    measure_savi_coeff = np.polyfit(sm_df['mean'], sm_df['nirv_downscal'], 1)
    # 拟合结果
    measure_evi_fit = measure_evi_coeff[0] * sm_df['mean'] + measure_evi_coeff[1]
    measure_ndvi_fit = measure_ndvi_coeff[0] * sm_df['mean'] + measure_ndvi_coeff[1]
    measure_msavi_fit = measure_msavi_coeff[0] * sm_df['mean'] + measure_msavi_coeff[1]
    measure_savi_fit = measure_savi_coeff[0] * sm_df['mean'] + measure_savi_coeff[1]

    plt.figure(num=1, figsize=(10, 10))
    plt.subplot(221)
    plt.title("Measure-EVI")
    plt.xlabel("Measure")
    plt.ylabel("EVI")
    plt.scatter(sm_df['mean'], sm_df['evi_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mean'].to_numpy()[measure_argmin], sm_df['mean'].to_numpy()[measure_argmax]],
             [measure_evi_fit.to_numpy()[measure_argmin], measure_evi_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_evi_coeff[0], measure_evi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean']['evi_downscal']))

    plt.subplot(222)
    plt.title("Measure-NDVI")
    plt.xlabel("Measure")
    plt.ylabel("NDVI")
    plt.scatter(sm_df['mean'], sm_df['ndvi_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mean'].to_numpy()[measure_argmin], sm_df['mean'].to_numpy()[measure_argmax]],
             [measure_ndvi_fit.to_numpy()[measure_argmin], measure_ndvi_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_ndvi_coeff[0], measure_ndvi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean']['ndvi_downscal']))

    plt.subplot(223)
    plt.title("Measure-MSAVI")
    plt.xlabel("Measure")
    plt.ylabel("MSAVI")
    plt.scatter(sm_df['mean'], sm_df['msavi_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mean'].to_numpy()[measure_argmin], sm_df['mean'].to_numpy()[measure_argmax]],
             [measure_msavi_fit.to_numpy()[measure_argmin], measure_msavi_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_msavi_coeff[0], measure_msavi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean']['msavi_downscal']))

    plt.subplot(224)
    plt.title("Measure-NIRV")
    plt.xlabel("Measure")
    plt.ylabel("NIRV")
    plt.scatter(sm_df['mean'], sm_df['nirv_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mean'].to_numpy()[measure_argmin], sm_df['mean'].to_numpy()[measure_argmax]],
             [measure_savi_fit.to_numpy()[measure_argmin], measure_savi_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_savi_coeff[0], measure_savi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean']['nirv_downscal']))

    # 拟合系数
    cci_evi_coeff = np.polyfit(sm_df['mcca_clip'], sm_df['evi_downscal'], 1)
    cci_ndvi_coeff = np.polyfit(sm_df['mcca_clip'], sm_df['ndvi_downscal'], 1)
    cci_msavi_coeff = np.polyfit(sm_df['mcca_clip'], sm_df['msavi_downscal'], 1)
    cci_savi_coeff = np.polyfit(sm_df['mcca_clip'], sm_df['nirv_downscal'], 1)
    # 拟合结果
    cci_evi_fit = cci_evi_coeff[0] * sm_df['mcca_clip'] + cci_evi_coeff[1]
    cci_ndvi_fit = cci_ndvi_coeff[0] * sm_df['mcca_clip'] + cci_ndvi_coeff[1]
    cci_msavi_fit = cci_msavi_coeff[0] * sm_df['mcca_clip'] + cci_msavi_coeff[1]
    cci_savi_fit = cci_savi_coeff[0] * sm_df['mcca_clip'] + cci_savi_coeff[1]

    plt.figure(num=2, figsize=(10, 10))
    plt.subplot(221)
    plt.title("CCI-EVI")
    plt.xlabel("CCI")
    plt.ylabel("EVI")
    plt.scatter(sm_df['mcca_clip'], sm_df['evi_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mcca_clip'].to_numpy()[cci_argmin], sm_df['mcca_clip'].to_numpy()[cci_argmax]],
             [cci_evi_fit.to_numpy()[cci_argmin], cci_evi_fit.to_numpy()[cci_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(cci_evi_coeff[0], cci_evi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mcca_clip']['evi_downscal']))

    plt.subplot(222)
    plt.title("CCI-NDVI")
    plt.xlabel("CCI")
    plt.ylabel("NDVI")
    plt.scatter(sm_df['mcca_clip'], sm_df['ndvi_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mcca_clip'].to_numpy()[cci_argmin], sm_df['mcca_clip'].to_numpy()[cci_argmax]],
             [cci_ndvi_fit.to_numpy()[cci_argmin], cci_ndvi_fit.to_numpy()[cci_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(cci_ndvi_coeff[0], cci_ndvi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mcca_clip']['ndvi_downscal']))

    plt.subplot(223)
    plt.title("CCI-MSAVI")
    plt.xlabel("CCI")
    plt.ylabel("MSAVI")
    plt.scatter(sm_df['mcca_clip'], sm_df['msavi_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mcca_clip'].to_numpy()[cci_argmin], sm_df['mcca_clip'].to_numpy()[cci_argmax]],
             [cci_msavi_fit.to_numpy()[cci_argmin], cci_msavi_fit.to_numpy()[cci_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(cci_msavi_coeff[0], cci_msavi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mcca_clip']['msavi_downscal']))

    plt.subplot(224)
    plt.title("CCI-NIRV")
    plt.xlabel("CCI")
    plt.ylabel("NIRV")
    plt.scatter(sm_df['mcca_clip'], sm_df['nirv_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mcca_clip'].to_numpy()[cci_argmin], sm_df['mcca_clip'].to_numpy()[cci_argmax]],
             [cci_savi_fit.to_numpy()[cci_argmin], cci_savi_fit.to_numpy()[cci_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(cci_savi_coeff[0], cci_savi_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mcca_clip']['nirv_downscal']))

    # 拟合系数
    measure_cci_coeff = np.polyfit(sm_df['mean'], sm_df['mcca_clip'], 1)
    # 拟合结果
    measure_cci_fit = measure_cci_coeff[0] * sm_df['mean'] + measure_cci_coeff[1]
    plt.figure(num=3)
    plt.title("Measure-CCI")
    plt.xlabel("Measure")
    plt.ylabel("CCI")
    plt.scatter(sm_df['mean'], sm_df['mcca_clip'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mean'].to_numpy()[measure_argmin], sm_df['mean'].to_numpy()[measure_argmax]],
             [measure_cci_fit.to_numpy()[measure_argmin], measure_cci_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff[0], measure_cci_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean']['mcca_clip']))
    plt.show()


def corr_measure_smap(excel_path, product1, product2):
    ctp_df = pd.read_csv(excel_path)
    # ctp_df = pd.read_excel(excel_path)
    # 筛选月份
    # ctp_df = ctp_df[ctp_df['month'] == 12]
    ctp_df = ctp_df[ctp_df['year'] == 2015]
    # ctp_df = ctp_df[(ctp_df['station'] == 'Y1') | (ctp_df['station'] == 'Y2') | (ctp_df['station'] == 'Y3') |
    #                 (ctp_df['station'] == 'Y4') | (ctp_df['station'] == 'Y5') | (ctp_df['station'] == 'Y6') |
    #                 (ctp_df['station'] == 'Y7') | (ctp_df['station'] == 'Y8') | (ctp_df['station'] == 'Y9') |
    #                 (ctp_df['station'] == 'Y10') | (ctp_df['station'] == 'Y11') | (ctp_df['station'] == 'Y12') |
    #                 (ctp_df['station'] == 'Y13') | (ctp_df['station'] == 'Y14')]
    # ctp_df = ctp_df[(ctp_df['station'] == 'Y1') | (ctp_df['station'] == 'Y2') |
    #                   (ctp_df['station'] == 'Y4') | (ctp_df['station'] == 'Y10') | (ctp_df['station'] == 'K12')]
    # ctp_df = ctp_df[(ctp_df['station'] == 'Y1') | (ctp_df['station'] == 'Y2') | (ctp_df['station'] == 'Y4') |
    #                   (ctp_df['station'] == 'Y5') | (ctp_df['station'] == 'Y9') | (ctp_df['station'] == 'Y10') |
    #                   (ctp_df['station'] == 'Y12') | (ctp_df['station'] == 'K10') | (ctp_df['station'] == 'K11') |
    #                   (ctp_df['station'] == 'K12') | (ctp_df['station'] == 'K14')]
    # ctp_df = ctp_df[(ctp_df['year'] == 2014) & (ctp_df['month'] == 5)]
    # ctp_df = ctp_df[(ctp_df['month'] > 10) | (ctp_df['month'] < 5)]
    # ctp_df = ctp_df[(ctp_df['month'] >= 1) & (ctp_df['month'] <= 3)]
    ctp_df = ctp_df.groupby(['date', 'cci']).mean().reset_index()
    # 计算皮尔逊相关系数
    ctp_df[product1] = ctp_df[product1].astype(float)
    ctp_df[product2] = ctp_df[product2].astype(float)
    # sm_df = ctp_df[['mean', product]]
    # sm_df = ctp_df[['mean', 'evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'cci']]
    # sm_df = ctp_df[['mean', 'evi_downscal', 'msavi_downscal', 'ndvi_downscal', 'nirv_downscal', 'fpar_downscal', 'lai_downscal', 'cci']]
    sm_df = ctp_df[[product1, product2]]
    # sm_df = ctp_df[['mean', 'evi', 'msavi', 'ndvi', 'nirv', 'fpar', 'lai', 'cci']]
    # 去掉存在nan的行
    sm_df = sm_df.dropna(axis=0, how='any')
    # 计算相关系数
    corr = sm_df.corr(method='pearson')
    rmse = np.sqrt(np.mean((sm_df[product2] - sm_df[product1]) ** 2))
    rmsd = np.sqrt(np.mean((sm_df[product2] - sm_df[product2].mean()) ** 2))
    # 计算偏差
    bias = (sm_df[product2] - sm_df[product1]).mean()

    measure_argmin = np.argmin(sm_df[product1])
    measure_argmax = np.argmax(sm_df[product1])

    # 拟合系数
    measure_cci_coeff = np.polyfit(sm_df[product1], sm_df[product2], 1)
    # 拟合结果
    measure_cci_fit = measure_cci_coeff[0] * sm_df[product1] + measure_cci_coeff[1]
    # 颜色数组，计算每个点到y=x直线的距离
    # d = np.abs(sm_df[product2] - sm_df[product1])
    # 计算到几何中心距离
    d = np.sqrt((sm_df[product1] - sm_df[product1].mean()) ** 2 + (sm_df[product2] - sm_df[product2].mean()) ** 2)
    # # 归一化
    d = d / d.max()

    selfCmap = LinearSegmentedColormap.from_list('selfCmap', ["yellow", "gold", "greenyellow", "green", "deepskyblue", "blue", "purple"], N=256)
    plt.figure(num=3, figsize=(10, 10))
    plt.title(product1 + "-" + product2, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.scatter(sm_df[product1], sm_df[product2], s=3, c=d, cmap='viridis_r')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    # plt.plot([sm_df[product1].to_numpy()[measure_argmin], sm_df[product1].to_numpy()[measure_argmax]],
    #          [measure_cci_fit.to_numpy()[measure_argmin], measure_cci_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff[0], measure_cci_coeff[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)
    plt.grid()
    plt.show()


# 分析高程和土地覆盖下的降尺度结果
def corr_measure_smap_dem_land(excel_path, product1, product2):
    ctp_df = pd.read_csv(excel_path)
    ctp_df = ctp_df[ctp_df['year'] == 2015]
    ctp_df = ctp_df.groupby(['date', 'cci']).mean().reset_index()
    # 计算皮尔逊相关系数
    # sm_df = ctp_df[['mean', 'evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'cci']]
    sm_df = ctp_df[['mean', 'evi_downscal', 'msavi_downscal', 'ndvi_downscal', 'nirv_downscal', 'fpar_downscal', 'lai_downscal', 'cci', 'landcover_2015', 'dem']]
    sm_df = sm_df.dropna(axis=0, how='any')
    # 筛选条件
    sm_grassland_df = sm_df[sm_df['landcover_2015'] == 10]
    sm_cropland_df = sm_df[sm_df['landcover_2015'] == 12]
    sm_dem100_df = sm_df[abs(sm_df['dem'] - 202.94) < 100]
    sm_dem300_df = sm_df[abs(sm_df['dem'] - 202.94) < 300]
    # 去掉存在nan的行
    # 计算相关系数
    # 草地
    corr_grassland = sm_grassland_df.corr(method='pearson')
    rmse_grassland = np.sqrt(np.mean((sm_grassland_df[product2] - sm_grassland_df[product1]) ** 2))
    rmsd_grassland = np.sqrt(np.mean((sm_grassland_df[product2] - sm_grassland_df[product2].mean()) ** 2))
    # 计算偏差
    bias_grassland = (sm_grassland_df[product2] - sm_grassland_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_grassland = np.polyfit(sm_grassland_df[product1], sm_grassland_df[product2], 1)
    # 计算到几何中心距离
    d_grassland = np.sqrt((sm_grassland_df[product1] - sm_grassland_df[product1].mean()) ** 2 + (sm_grassland_df[product2] - sm_grassland_df[product2].mean()) ** 2)
    # 归一化
    d_grassland = d_grassland / d_grassland.max()

    # 耕地
    corr_cropland = sm_cropland_df.corr(method='pearson')
    rmse_cropland = np.sqrt(np.mean((sm_cropland_df[product2] - sm_cropland_df[product1]) ** 2))
    rmsd_cropland = np.sqrt(np.mean((sm_cropland_df[product2] - sm_cropland_df[product2].mean()) ** 2))
    # 计算偏差
    bias_cropland = (sm_cropland_df[product2] - sm_cropland_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_cropland = np.polyfit(sm_cropland_df[product1], sm_cropland_df[product2], 1)
    # 计算到几何中心距离
    d_cropland = np.sqrt((sm_cropland_df[product1] - sm_cropland_df[product1].mean()) ** 2 + (sm_cropland_df[product2] - sm_cropland_df[product2].mean()) ** 2)
    # 归一化
    d_cropland = d_cropland / d_cropland.max()

    # 高程100m
    corr_dem100 = sm_dem100_df.corr(method='pearson')
    rmse_dem100 = np.sqrt(np.mean((sm_dem100_df[product2] - sm_dem100_df[product1]) ** 2))
    rmsd_dem100 = np.sqrt(np.mean((sm_dem100_df[product2] - sm_dem100_df[product2].mean()) ** 2))
    # 计算偏差
    bias_dem100 = (sm_dem100_df[product2] - sm_dem100_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_dem100 = np.polyfit(sm_dem100_df[product1], sm_dem100_df[product2], 1)
    # 计算到几何中心距离
    d_dem100 = np.sqrt((sm_dem100_df[product1] - sm_dem100_df[product1].mean()) ** 2 + (sm_dem100_df[product2] - sm_dem100_df[product2].mean()) ** 2)
    # 归一化
    d_dem100 = d_dem100 / d_dem100.max()

    # 高程300m
    corr_dem300 = sm_dem300_df.corr(method='pearson')
    rmse_dem300 = np.sqrt(np.mean((sm_dem300_df[product2] - sm_dem300_df[product1]) ** 2))
    rmsd_dem300 = np.sqrt(np.mean((sm_dem300_df[product2] - sm_dem300_df[product2].mean()) ** 2))
    # 计算偏差
    bias_dem300 = (sm_dem300_df[product2] - sm_dem300_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_dem300 = np.polyfit(sm_dem300_df[product1], sm_dem300_df[product2], 1)
    # 计算到几何中心距离
    d_dem300 = np.sqrt((sm_dem300_df[product1] - sm_dem300_df[product1].mean()) ** 2 + (sm_dem300_df[product2] - sm_dem300_df[product2].mean()) ** 2)
    # 归一化
    d_dem300 = d_dem300 / d_dem300.max()

    # selfCmap = LinearSegmentedColormap.from_list('selfCmap', ["yellow", "gold", "greenyellow", "green", "deepskyblue", "blue", "purple"], N=256)
    plt.figure(num=1, figsize=(10, 10))
    plt.title(product1 + "-" + product2, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.subplot(221)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Grassland', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.scatter(sm_grassland_df[product1], sm_grassland_df[product2], s=3, c=d_grassland, cmap='viridis_r')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_grassland[0], measure_cci_coeff_grassland[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_grassland[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_grassland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_grassland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_grassland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.subplot(222)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Cropland', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.scatter(sm_cropland_df[product1], sm_cropland_df[product2], s=3, c=d_cropland, cmap='viridis_r')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_cropland[0], measure_cci_coeff_cropland[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_cropland[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_cropland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_cropland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_cropland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.subplot(223)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Dem100', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.scatter(sm_dem100_df[product1], sm_dem100_df[product2], s=3, c=d_dem100, cmap='viridis_r')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_dem100[0], measure_cci_coeff_dem100[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_dem100[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_dem100), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_dem100), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_dem100), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.subplot(224)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Dem300', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.scatter(sm_dem300_df[product1], sm_dem300_df[product2], s=3, c=d_dem300, cmap='viridis_r')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_dem300[0], measure_cci_coeff_dem300[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_dem300[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_dem300), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_dem300), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_dem300), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.show()


# 分析高程和土地覆盖下的降尺度结果区分站点颜色
def corr_measure_smap_dem_land_with_station(excel_path, product1, product2):
    ctp_df = pd.read_csv(excel_path)
    ctp_df = ctp_df[ctp_df['year'] == 2015]
    # ctp_df = ctp_df.groupby(['date', 'cci']).agg({'station': 'first', 'mean': 'mean', 'evi_downscal': 'mean', 'msavi_downscal': 'mean', 'ndvi_downscal': 'mean', 'nirv_downscal': 'mean', 'fpar_downscal': 'mean', 'lai_downscal': 'mean', 'landcover_2015': 'first', 'dem': 'first'}).reset_index()
    # print(ctp_df.head(20))
    # 计算皮尔逊相关系数
    # sm_df = ctp_df[['mean', 'evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'cci']]
    sm_df = ctp_df[['station', 'mean', 'evi_downscal', 'msavi_downscal', 'ndvi_downscal', 'nirv_downscal', 'fpar_downscal', 'lai_downscal', 'cci', 'landcover_2015', 'dem']]
    sm_df = sm_df.dropna(axis=0, how='any')
    # 筛选条件
    sm_grassland_df = sm_df[sm_df['landcover_2015'] == 10]
    sm_cropland_df = sm_df[sm_df['landcover_2015'] == 12]
    sm_dem100_df = sm_df[abs(sm_df['dem'] - 202.94) < 100]
    sm_dem300_df = sm_df[abs(sm_df['dem'] - 202.94) < 300]
    # 去掉存在nan的行
    # 计算相关系数
    # 草地
    corr_grassland = sm_grassland_df.corr(method='pearson')
    rmse_grassland = np.sqrt(np.mean((sm_grassland_df[product2] - sm_grassland_df[product1]) ** 2))
    rmsd_grassland = np.sqrt(np.mean((sm_grassland_df[product2] - sm_grassland_df[product2].mean()) ** 2))
    # 计算偏差
    bias_grassland = (sm_grassland_df[product2] - sm_grassland_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_grassland = np.polyfit(sm_grassland_df[product1], sm_grassland_df[product2], 1)

    # 耕地
    corr_cropland = sm_cropland_df.corr(method='pearson')
    rmse_cropland = np.sqrt(np.mean((sm_cropland_df[product2] - sm_cropland_df[product1]) ** 2))
    rmsd_cropland = np.sqrt(np.mean((sm_cropland_df[product2] - sm_cropland_df[product2].mean()) ** 2))
    # 计算偏差
    bias_cropland = (sm_cropland_df[product2] - sm_cropland_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_cropland = np.polyfit(sm_cropland_df[product1], sm_cropland_df[product2], 1)

    # 高程100m
    corr_dem100 = sm_dem100_df.corr(method='pearson')
    rmse_dem100 = np.sqrt(np.mean((sm_dem100_df[product2] - sm_dem100_df[product1]) ** 2))
    rmsd_dem100 = np.sqrt(np.mean((sm_dem100_df[product2] - sm_dem100_df[product2].mean()) ** 2))
    # 计算偏差
    bias_dem100 = (sm_dem100_df[product2] - sm_dem100_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_dem100 = np.polyfit(sm_dem100_df[product1], sm_dem100_df[product2], 1)

    # 高程300m
    corr_dem300 = sm_dem300_df.corr(method='pearson')
    rmse_dem300 = np.sqrt(np.mean((sm_dem300_df[product2] - sm_dem300_df[product1]) ** 2))
    rmsd_dem300 = np.sqrt(np.mean((sm_dem300_df[product2] - sm_dem300_df[product2].mean()) ** 2))
    # 计算偏差
    bias_dem300 = (sm_dem300_df[product2] - sm_dem300_df[product1]).mean()
    # 拟合系数
    measure_cci_coeff_dem300 = np.polyfit(sm_dem300_df[product1], sm_dem300_df[product2], 1)

    # selfCmap = LinearSegmentedColormap.from_list('selfCmap', ["yellow", "gold", "greenyellow", "green", "deepskyblue", "blue", "purple"], N=256)
    plt.figure(num=1, figsize=(10, 10))
    plt.title(product1 + "-" + product2, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.subplot(221)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Grassland', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    # plt.scatter(sm_grassland_df[product1], sm_grassland_df[product2], s=3, c=d_grassland, cmap='viridis_r')
    for site, group in sm_grassland_df.groupby('station'):
        plt.scatter(group[product1], group[product2], s=3, label=site)
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_grassland[0], measure_cci_coeff_grassland[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_grassland[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_grassland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_grassland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_grassland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.subplot(222)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Cropland', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    # plt.scatter(sm_cropland_df[product1], sm_cropland_df[product2], s=3, c=d_cropland, cmap='viridis_r')
    for site, group in sm_cropland_df.groupby('station'):
        plt.scatter(group[product1], group[product2], s=3, label=site)
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_cropland[0], measure_cci_coeff_cropland[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_cropland[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_cropland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_cropland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_cropland), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.subplot(223)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Dem100', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    # plt.scatter(sm_dem100_df[product1], sm_dem100_df[product2], s=3, c=d_dem100, cmap='viridis_r')
    for site, group in sm_dem100_df.groupby('station'):
        plt.scatter(group[product1], group[product2], s=3, label=site)
    plt.legend(loc='lower right', ncol=2, fontsize=10)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_dem100[0], measure_cci_coeff_dem100[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_dem100[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_dem100), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_dem100), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_dem100), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.subplot(224)
    plt.xlabel(product1, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.ylabel(product2 + ' Dem300', fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    # plt.scatter(sm_dem300_df[product1], sm_dem300_df[product2], s=3, c=d_dem300, cmap='viridis_r')
    for site, group in sm_dem300_df.groupby('station'):
        plt.scatter(group[product1], group[product2], s=3, label=site)
    plt.legend(loc='lower right', ncol=2, fontsize=10)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([0, 1], [0.05, 1.15], 'k:')
    plt.plot([0.05, 1], [0, 0.85], 'k:')
    plt.text(0.01, 0.95, 'y = {:.4f} x + {:.4f}'.format(measure_cci_coeff_dem300[0], measure_cci_coeff_dem300[1]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.91, 'R = {:.4f}'.format(corr_dem300[product1][product2]), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.87, 'RMSE = {:.4f}'.format(rmse_dem300), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.83, 'RMSD = {:.4f}'.format(rmsd_dem300), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.text(0.01, 0.79, 'Bias = {:.4f}'.format(bias_dem300), fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    plt.xlim(0, 1)
    plt.ylim(0, 1)

    plt.show()


# 分析不同站点或季节的降尺度相关性柱状图
def corr_downscal_with_station(excel_path, product1, product2, bar_type):
    ctp_df = pd.read_csv(excel_path)
    ctp_df = ctp_df[ctp_df['year'] == 2015]

    R_df = pd.DataFrame()
    unrmse_df = pd.DataFrame()
    RMSD_df = pd.DataFrame()
    BIAS_df = pd.DataFrame()

    if bar_type == 'station':
        bar_width = 0.3  # 条形宽度
        stations = ['Y1', 'Y2', 'Y4', 'Y5', 'Y6', 'Y7', 'Y8', 'Y9', 'Y10', 'Y11', 'Y12', 'Y13', 'K7', 'K10', 'K11', 'K12', 'K13', 'K14']
        for station in stations:
            ctp_df_station = ctp_df[ctp_df['station'] == station]
            # 计算皮尔逊相关系数
            sm_df = ctp_df_station[['station', 'mean', 'lai_downscal', 'cci', 'landcover_2015', 'dem']]
            sm_df = sm_df.dropna(axis=0, how='any')
            # 筛选条件
            sm_cci_df = sm_df
            # 去掉存在nan的行
            # 计算相关系数

            # cci
            corr_cci = sm_cci_df.corr(method='pearson')
            unrmse_cci = np.sqrt(np.mean((sm_cci_df['cci'] - sm_cci_df[product1]) ** 2))
            # rmse_cci = np.sqrt(np.mean((sm_cci_df['cci'] - sm_cci_df[product1]) ** 2))
            rmsd_cci = np.sqrt(np.mean((sm_cci_df['cci'] - sm_cci_df['cci'].mean()) ** 2))
            # 计算偏差
            bias_cci = (sm_cci_df['cci'] - sm_cci_df[product1]).mean()

            # 不筛选
            corr_sm = sm_df.corr(method='pearson')
            unrmse_sm = np.sqrt(np.mean((sm_df[product2] - sm_df[product1]) ** 2))
            rmsd_sm = np.sqrt(np.mean((sm_df[product2] - sm_df[product2].mean()) ** 2))
            # 计算偏差
            bias_sm = (sm_df[product2] - sm_df[product1]).mean()


            # 保存结果
            R_df = R_df.append({'station': station, 'CCI SM': corr_cci[product1]['cci'], 'SM': corr_sm[product1][product2]}, ignore_index=True)
            unrmse_df = unrmse_df.append({'station': station, 'CCI SM': unrmse_cci, 'SM': unrmse_sm}, ignore_index=True)
            RMSD_df = RMSD_df.append({'station': station, 'CCI SM': rmsd_cci, 'SM': rmsd_sm}, ignore_index=True)
            BIAS_df = BIAS_df.append({'station': station, 'CCI SM': bias_cci, 'SM': bias_sm}, ignore_index=True)

        R_df_cci_mean, R_df_sm_mean = R_df['CCI SM'].mean(), R_df['SM'].mean()
        unrmse_df_cci_mean, unrmse_df_sm_mean = unrmse_df['CCI SM'].mean(), unrmse_df['SM'].mean()
        RMSD_df_cci_mean, RMSD_df_sm_mean = RMSD_df['CCI SM'].mean(), RMSD_df['SM'].mean()
        BIAS_df_cci_mean, BIAS_df_sm_mean = BIAS_df['CCI SM'].mean(), BIAS_df['SM'].mean()

        R_df_cci_std, R_df_sm_std = R_df['CCI SM'].std(),  R_df['SM'].std()
        unrmse_df_cci_std, unrmse_df_sm_std = unrmse_df['CCI SM'].std(),  unrmse_df['SM'].std()
        RMSD_df_cci_std, RMSD_df_sm_std = RMSD_df['CCI SM'].std(),  RMSD_df['SM'].std()
        BIAS_df_cci_std, BIAS_df_sm_std = BIAS_df['CCI SM'].std(),  BIAS_df['SM'].std()

        R_df.loc[-1] = {'station': 'Mean', 'CCI SM': R_df['CCI SM'].mean(), 'SM': R_df['SM'].mean()}
        unrmse_df.loc[-1] = {'station': 'Mean', 'CCI SM': unrmse_df['CCI SM'].mean(), 'SM': unrmse_df['SM'].mean()}
        RMSD_df.loc[-1] = {'station': 'Mean', 'CCI SM': RMSD_df['CCI SM'].mean(), 'SM': RMSD_df['SM'].mean()}
        BIAS_df.loc[-1] = {'station': 'Mean', 'CCI SM': BIAS_df['CCI SM'].mean(), 'SM': BIAS_df['SM'].mean()}

        R_df.index = R_df.index + 1
        unrmse_df.index = unrmse_df.index + 1
        RMSD_df.index = RMSD_df.index + 1
        BIAS_df.index = BIAS_df.index + 1
        R_df = R_df.sort_index()
        unrmse_df = unrmse_df.sort_index()
        RMSD_df = RMSD_df.sort_index()
        BIAS_df = BIAS_df.sort_index()
        index_cci = np.arange(len(stations) + 1)
        index_sm = np.arange(len(stations) + 1) + bar_width
    elif bar_type == 'season':
        bar_width = 0.05  # 条形宽度
        seasons = ['All season', 'Spring', 'Summer', 'Autumn', 'Winter']
        for season in seasons:
            if season == 'Spring':
                ctp_df_season = ctp_df[(ctp_df['month'] == 9) | (ctp_df['month'] == 10) | (ctp_df['month'] == 11)]
            elif season == 'Summer':
                ctp_df_season = ctp_df[(ctp_df['month'] == 12) | (ctp_df['month'] == 1) | (ctp_df['month'] == 2)]
            elif season == 'Autumn':
                ctp_df_season = ctp_df[(ctp_df['month'] == 3) | (ctp_df['month'] == 4) | (ctp_df['month'] == 5)]
            elif season == 'Winter':
                ctp_df_season = ctp_df[(ctp_df['month'] == 6) | (ctp_df['month'] == 7) | (ctp_df['month'] == 8)]
            else:
                ctp_df_season = ctp_df
            # 计算皮尔逊相关系数
            sm_df = ctp_df_season[['station', 'mean', 'lai_downscal', 'cci']]
            sm_df = sm_df.dropna(axis=0, how='any')
            # 筛选条件
            sm_cci_df = sm_df
            # 去掉存在nan的行
            # 计算相关系数

            # cci
            corr_cci = sm_cci_df.corr(method='pearson')
            unrmse_cci = np.sqrt(np.mean((sm_cci_df['cci'] - sm_cci_df[product1]) ** 2))
            rmsd_cci = np.sqrt(np.mean((sm_cci_df['cci'] - sm_cci_df['cci'].mean()) ** 2))
            # 计算偏差
            bias_cci = (sm_cci_df['cci'] - sm_cci_df[product1]).mean()

            # 不筛选
            corr_sm = sm_df.corr(method='pearson')
            unrmse_sm = np.sqrt(np.mean((sm_df[product2] - sm_df[product1]) ** 2))
            rmsd_sm = np.sqrt(np.mean((sm_df[product2] - sm_df[product2].mean()) ** 2))
            # 计算偏差
            bias_sm = (sm_df[product2] - sm_df[product1]).mean()

            # 保存结果
            R_df = R_df.append({'season': season, 'CCI SM': corr_cci[product1]['cci'], 'SM': corr_sm[product1][product2]}, ignore_index=True)
            unrmse_df = unrmse_df.append({'season': season, 'CCI SM': unrmse_cci, 'SM': unrmse_sm}, ignore_index=True)
            RMSD_df = RMSD_df.append({'season': season, 'CCI SM': rmsd_cci, 'SM': rmsd_sm}, ignore_index=True)
            BIAS_df = BIAS_df.append({'season': season, 'CCI SM': bias_cci, 'SM': bias_sm}, ignore_index=True)

            index_cci = np.arange(len(seasons))
            index_sm = np.arange(len(seasons)) + bar_width
    else:
        return None
    plt.figure(num=1, figsize=(10, 40))
    plt.title(product1 + "-" + product2, fontdict={'size': 12, 'family': 'Times New Roman', 'weight': 'bold'})
    # 绘制直方图
    plt.subplot(4, 1, 1)
    plt.bar(index_cci, BIAS_df['CCI SM'], width=bar_width, color='orange', label='CCI SM')
    plt.bar(index_sm, BIAS_df['SM'], width=bar_width, color='blue', label='MODIS downscal SM')
    plt.xlim(-0.5, len(stations) + 0.8)
    plt.ylim(-0.02, 0.1)
    plt.ylabel('BIAS\n(m³ / m³)')
    plt.xticks(index_cci, [])
    plt.yticks([-0.02, 0, 0.02, 0.04, 0.06, 0.08, 0.1])
    if bar_type == 'station':
        plt.errorbar(x=index_cci[0], y=BIAS_df_cci_mean, yerr=BIAS_df_cci_std, fmt='none', ecolor='black', capsize=5)
        plt.errorbar(x=index_sm[0], y=BIAS_df_sm_mean, yerr=BIAS_df_sm_std, fmt='none', ecolor='black', capsize=5)
    plt.grid(linestyle='--', alpha=0.8)


    plt.subplot(4, 1, 2)
    plt.bar(index_cci, unrmse_df['CCI SM'], width=bar_width, color='orange', label='CCI SM')
    plt.bar(index_sm, unrmse_df['SM'], width=bar_width, color='blue', label='MODIS downscal SM')
    plt.xlim(-0.5, len(stations) + 0.8)
    plt.ylim(0, 0.1)
    plt.ylabel('unRMSE\n(m³ / m³)')
    plt.xticks(index_cci, [])
    plt.yticks([0.05, 0.1])
    if bar_type == 'station':
        plt.errorbar(x=index_cci[0], y=unrmse_df_cci_mean, yerr=unrmse_df_cci_std, fmt='none', ecolor='black', capsize=5)
        plt.errorbar(x=index_sm[0], y=unrmse_df_sm_mean, yerr=unrmse_df_sm_std, fmt='none', ecolor='black', capsize=5)
    plt.grid(linestyle='--', alpha=0.8)

    plt.subplot(4, 1, 3)
    plt.bar(index_cci, RMSD_df['CCI SM'], width=bar_width, color='orange', label='CCI SM')
    plt.bar(index_sm, RMSD_df['SM'], width=bar_width, color='blue', label='MODIS downscal SM')
    plt.ylim(0, 0.1)
    plt.ylabel('RMSD\n(m³ / m³)')
    plt.xticks(index_cci, [])
    plt.yticks([0.05, 0.1])
    if bar_type == 'station':
        plt.errorbar(x=index_cci[0], y=RMSD_df_cci_mean, yerr=RMSD_df_cci_std, fmt='none', ecolor='black', capsize=5)
        plt.errorbar(x=index_sm[0], y=RMSD_df_sm_mean, yerr=RMSD_df_sm_std, fmt='none', ecolor='black', capsize=5)
    plt.grid(linestyle='--', alpha=0.8)

    plt.subplot(4, 1, 4)
    plt.bar(index_cci, R_df['CCI SM'], width=bar_width, color='orange', label='CCI SM')
    plt.bar(index_sm, R_df['SM'], width=bar_width, color='blue', label='MODIS downscal SM')
    plt.ylim(-0.2, 0.8)
    plt.ylabel('R')
    plt.xticks(index_cci, [])
    plt.yticks([-0.2, 0, 0.2, 0.4, 0.6, 0.8])
    if bar_type == 'station':
        plt.errorbar(x=index_cci[0], y=R_df_cci_mean, yerr=R_df_cci_std, fmt='none', ecolor='black', capsize=5)
        plt.errorbar(x=index_sm[0], y=R_df_sm_mean, yerr=R_df_sm_std, fmt='none', ecolor='black', capsize=5)
    plt.grid(linestyle='--', alpha=0.8)

    plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, ncol=5)
    plt.subplots_adjust(hspace=0.5)
    if bar_type == 'station':
        stations.insert(0, 'Mean')
        plt.xticks(index_cci, stations)
    elif bar_type == 'season':
        plt.xticks(index_cci, seasons)
    plt.show()

def R_boxplot(excel_path, plot_type):
    data_df = pd.read_csv(excel_path)
    data_df = data_df[data_df['year'] == 2015]
    # data_df = data_df[(data_df['month'] >= 8) & (data_df['month'] <= 12)]
    # data_df = data_df[(data_df['station'] == 'Y1') | (data_df['station'] == 'Y2') | (data_df['station'] == 'Y3') |
    #                 (data_df['station'] == 'Y4') | (data_df['station'] == 'Y5') | (data_df['station'] == 'Y6') |
    #                 (data_df['station'] == 'Y7') | (data_df['station'] == 'Y8') | (data_df['station'] == 'Y9') |
    #                 (data_df['station'] == 'Y10') | (data_df['station'] == 'Y11') | (data_df['station'] == 'Y12') |
    #                 (data_df['station'] == 'Y13') | (data_df['station'] == 'Y14')]
    # data_df = data_df[(data_df['station'] == 'Y1') | (data_df['station'] == 'Y10') | (data_df['station'] == 'Y12') |
    #                   (data_df['station'] == 'K11') | (data_df['station'] == 'K12') | (data_df['station'] == 'K14')]
    # data_df = data_df[(data_df['station'] == 'Y1') | (data_df['station'] == 'Y2') | (data_df['station'] == 'Y4') |
    #                   (data_df['station'] == 'Y7') | (data_df['station'] == 'Y8') | (data_df['station'] == 'Y10') |
    #                   (data_df['station'] == 'Y12') | (data_df['station'] == 'Y13') | (data_df['station'] == 'K10') |
    #                   (data_df['station'] == 'K12') | (data_df['station'] == 'K14')]
    print(data_df)
    # 按照相同日期和实测数据进行平均
    # data_df = data_df.groupby(['date', 'cci']).mean().reset_index()
    # print(data_df)
    sm_df = data_df[['month', 'day', 'evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'mean', 'dem', 'landcover_2015']]
    # sm_df = data_df[['month', 'day', 'evi', 'msavi', 'ndvi', 'nirv', 'fpar', 'lai', 'mean']]
    corr_df = pd.DataFrame(columns=['evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'dem', 'landcover_2015'])
    # corr_df = pd.DataFrame(columns=['evi', 'msavi', 'ndvi', 'nirv', 'fpar', 'lai'])
    # 分析不同方法的降尺度
    # sm_df = data_df[['month', 'day', 'cci', 'naqu', 'physical', 'weight', 'mean']]
    # corr_df = pd.DataFrame(columns=['cci', 'naqu', 'physical', 'weight'])
    sm_df = sm_df.dropna(axis=0, how='any')
    if plot_type == 'month':
        for i in range(12):
            # if 2 < i+1 < 11:
            #     continue
            sm_month_df = sm_df[sm_df['month'] == i + 1]
            print(sm_month_df.corr())
            corr_df.loc[i] = sm_month_df.corr()['mean']
        plt.title("Correlation of the Measure computed by each Month")
    elif plot_type == 'season':
        sm_spring_df = sm_df[(sm_df['month'] == 12) | (sm_df['month'] == 1) | (sm_df['month'] == 2)]
        sm_summer_df = sm_df[(sm_df['month'] == 3) | (sm_df['month'] == 4) | (sm_df['month'] == 5)]
        sm_fall_df = sm_df[(sm_df['month'] == 6) | (sm_df['month'] == 7) | (sm_df['month'] == 8)]
        sm_winter_df = sm_df[(sm_df['month'] == 9) | (sm_df['month'] == 10) | (sm_df['month'] == 11)]
        corr_spring_df = sm_spring_df.corr()['mean']
        corr_summer_df = sm_summer_df.corr()['mean']
        corr_fall_df = sm_fall_df.corr()['mean']
        corr_winter_df = sm_winter_df.corr()['mean']
        corr_df.loc[0] = corr_spring_df
        corr_df.loc[1] = corr_summer_df
        corr_df.loc[2] = corr_fall_df
        corr_df.loc[3] = corr_winter_df
        plt.title("Correlation of the Measure computed by each Season")
    elif plot_type == 'day':
        index = 0
        for month in range(1, 13):
            for day in range(1, 32):
                sm_day_df = sm_df[(sm_df['month'] == month) & (sm_df['day'] == day)]
                if sm_day_df.shape[0] > 0:
                    corr_df.loc[index] = sm_day_df.corr()['mean']
                    index += 1
        plt.title("Correlation of the Measure computed by each Day")
    else:
        print("Wrong plot type!")
    print(corr_df)
    sns.boxplot(corr_df, width=0.5,
                boxprops=dict(edgecolor="blue", facecolor="white"),
                whiskerprops=dict(color="black"),
                medianprops=dict(color="red"),
                capprops=dict(color="black"))
    plt.ylabel("R")
    plt.ylim(-1, 1)
    plt.show()


# 加入高程和地表覆盖类型的R箱型图
def R_boxplot_with_dem_and_land(excel_path):
    data_df = pd.read_csv(excel_path)
    data_df = data_df[data_df['year'] == 2015]
    # 按照相同日期和实测数据进行平均
    # data_df = data_df.groupby(['date', 'cci']).mean().reset_index()
    data_df = data_df[['month', 'day', 'evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'mean', 'dem', 'landcover_2015']]
    data_df = data_df.dropna(axis=0, how='any')

    # corr_df = pd.DataFrame(columns=['evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'dem', 'landcover_2015'])
    avdi_grassland_df = data_df[data_df['landcover_2015'] == 10]
    avdi_cropland_df = data_df[data_df['landcover_2015'] == 12]
    avdi_dem100_df = data_df[abs(data_df['dem'] - 202.94) < 100]
    avdi_dem300_df = data_df[abs(data_df['dem'] - 202.94) < 300]

    print(avdi_dem100_df)
    print(avdi_grassland_df)
    corr_df = pd.DataFrame(columns=['lai_grassland', 'lai_cropland', 'lai_dem100', 'lai_dem300'])
    # 分析不同方法的降尺度
    # sm_df = data_df[['month', 'day', 'cci', 'naqu', 'physical', 'weight', 'mean']]
    # corr_df = pd.DataFrame(columns=['cci', 'naqu', 'physical', 'weight'])
    for i in range(12):
        avdi_grassland_month_df = avdi_grassland_df[avdi_grassland_df['month'] == i + 1]
        avdi_cropland_month_df = avdi_cropland_df[avdi_cropland_df['month'] == i + 1]
        avdi_dem100_month_df = avdi_dem100_df[avdi_dem100_df['month'] == i + 1]
        avdi_dem300_month_df = avdi_dem300_df[avdi_dem300_df['month'] == i + 1]
        corr_df.loc[i, 'lai_grassland'] = avdi_grassland_month_df.corr()['mean']['lai_avdi']
        corr_df.loc[i, 'lai_cropland'] = avdi_cropland_month_df.corr()['mean']['lai_avdi']
        corr_df.loc[i, 'lai_dem100'] = avdi_dem100_month_df.corr()['mean']['lai_avdi']
        corr_df.loc[i, 'lai_dem300'] = avdi_dem300_month_df.corr()['mean']['lai_avdi']
    print(corr_df)
    plt.title("Correlation of the Measure computed by each Month")
    sns.boxplot(corr_df, width=0.5,
                boxprops=dict(edgecolor="blue", facecolor="white"),
                whiskerprops=dict(color="black"),
                medianprops=dict(color="red"),
                capprops=dict(color="black"))
    plt.ylabel("R")
    plt.ylim(-1, 1)
    plt.show()


def R_boxplot_by_station(excel_path, station):
    data_df = pd.read_csv(excel_path)
    # data_df = data_df[data_df['year'] == 2015]
    # data_df = data_df[(data_df['month'] >= 8) & (data_df['month'] <= 12)]
    data_df = data_df[data_df['station'] == station]
    # 按照相同日期和实测数据进行平均
    # data_df = data_df.groupby(['date', 'cci']).mean().reset_index()
    # print(data_df)
    sm_df = data_df[['month', 'day', 'evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi', 'mean']]
    # sm_df = data_df[['month', 'day', 'evi', 'msavi', 'ndvi', 'nirv', 'fpar', 'lai', 'mean']]
    # sm_df = data_df[['month', 'evi_downscal', 'msavi_downscal', 'ndvi_downscal', 'nirv_downscal', 'fpar_downscal', 'lai_downscal', 'mean']]
    corr_df = pd.DataFrame(columns=['evi_avdi', 'msavi_avdi', 'ndvi_avdi', 'nirv_avdi', 'fpar_avdi', 'lai_avdi'])
    # corr_df = pd.DataFrame(columns=['evi', 'msavi', 'ndvi', 'nirv', 'fpar', 'lai'])
    # corr_df = pd.DataFrame(columns=['evi_downscal', 'msavi_downscal', 'ndvi_downscal', 'nirv_downscal', 'fpar_downscal', 'lai_downscal'])
    # 分析不同方法的降尺度
    # sm_df = data_df[['month', 'day', 'cci', 'naqu', 'physical', 'weight', 'mean']]
    # corr_df = pd.DataFrame(columns=['cci', 'naqu', 'physical', 'weight'])
    for i in range(12):
        sm_month_df = sm_df[sm_df['month'] == i + 1]
        corr_df.loc[i] = sm_month_df.corr()['mean']
    plt.title("Correlation of the Measure computed by each Month {}".format(station))
    print(corr_df)
    sns.boxplot(corr_df)
    plt.ylabel("R")
    plt.ylim(-1, 1)
    plt.show()


# 对同一个像元里的站点数据进行平均
def read_data_by_pixel(excel_path):
    data_df = pd.read_csv(excel_path)
    data_df = data_df[(data_df['station'] == 'Y1') | (data_df['station'] == 'Y2') | (data_df['station'] == 'Y3') |
                      (data_df['station'] == 'Y4') | (data_df['station'] == 'Y5') | (data_df['station'] == 'Y6') |
                      (data_df['station'] == 'Y7') | (data_df['station'] == 'Y8') | (data_df['station'] == 'Y9') |
                      (data_df['station'] == 'Y10') | (data_df['station'] == 'Y11') | (data_df['station'] == 'Y12') |
                      (data_df['station'] == 'Y13') | (data_df['station'] == 'Y14')]
    # 对符合要求的行进行平均
    data_df = data_df.groupby(['date', 'cci']).mean().reset_index()
    print(data_df)


def read_landcover(landcover_dir, excel_path, col_name):
    data_df = pd.read_csv(excel_path)
    landcover = gdal.Open(landcover_dir)
    landcover_data = landcover.ReadAsArray()
    landcover_geo = landcover.GetGeoTransform()
    landcover_proj = landcover.GetProjection()
    prosrs, geosrs = getSRSPair(landcover)
    ct = osr.CoordinateTransformation(geosrs, prosrs)
    for index, row in tqdm(data_df.iterrows()):
        date = row["date"]
        lon = row["lon"]
        lat = row["lat"]
        # if str(date)[0:4] not in landcover_dir:
        #     continue
        # 根据日期计算儒略日
        targetday = datetime.date(int(str(date)[0:4]), int(str(date)[4:6]), int(str(date)[6:8]))
        day = targetday - datetime.date(targetday.year - 1, 12, 31)
        julian_day = str(date)[0:4] + str(day.days)

        # tif_path = os.path.join(landcover_dir, "MCD12Q1_{}.tif".format(str(date)[0:4]))
        # tif_path = landcover_dir
        # if not os.path.exists(tif_path):
        #     continue
        # coords = ct.TransformPoint(lon, lat)
        # print(coords)
        # print(lon, lat)
        col, row = coord2pixel(landcover_geo, lon, lat)
        # print(col, row)
        try:
            landcover_value = landcover_data[row, col]
            # if 0 <= landcover_value <= 18:
            if 0 <= landcover_value:
                data_df.loc[index, col_name] = landcover_value
        except:
            continue
    print(data_df)
    data_df.to_csv(excel_path, index=False)
    print("写入完成")


# 降尺度结果的时序柱状图和曲线
def plot_time_series_by_station(excel_path, station):
    data_df = pd.read_csv(excel_path)
    data_df = data_df[data_df['year'] == 2015]
    data_df = data_df[data_df['station'] == station]
    data_df = data_df.dropna(axis=0, subset=['evi'])
    # data_df = data_df.dropna(axis=0, how='any')

    date_list = data_df['date'].to_numpy()
    julian_day_list = []
    for date in date_list:
        targetday = datetime.date(int(str(date)[0:4]), int(str(date)[4:6]), int(str(date)[6:8]))
        day = targetday - datetime.date(targetday.year - 1, 12, 31)
        julian_day = str(day.days).rjust(3, '0')
        julian_day_list.append(julian_day)
    # print(julian_day_list)

    # 日降水量
    dail_precipitation_amount = data_df['CMORPH'].to_numpy()
    # evi
    evi = data_df['evi'].to_numpy()
    # lai
    lai = data_df['lai'].to_numpy()
    # cci
    cci = data_df['cci'].to_numpy()
    # 原位测量sm
    sm = data_df['mean'].to_numpy()
    # 降尺度结果
    sm_scaled = data_df['evi_downscal'].to_numpy()

    fig, ax1 = plt.subplots()
    fig.set_size_inches(15, 6)
    plt.subplots_adjust(left=0.06, right=0.94, top=0.9, bottom=0.2)
    plt.tick_params(top='in', right='in', which='both')

    ax1.bar(julian_day_list, dail_precipitation_amount, color='deepskyblue', label='Daily precipitation amount', alpha=0.6)

    ax2 = ax1.twinx()
    ax2.plot(julian_day_list, evi, color='green', marker='^', markerfacecolor='none', label='EVI')
    ax2.plot(julian_day_list, cci, color='black', marker='.', markersize=10, label='CCI SM', linestyle='')
    ax2.plot(julian_day_list, sm, color='blue', marker='s', markerfacecolor='none', label='In-situ SM measurement', linestyle='-')
    ax2.plot(julian_day_list, sm_scaled, color='red', marker='.', markersize=10, label='Downscaled SM', linestyle='')

    # plt.title('{}'.format(station))
    ax1.grid(True, alpha=0.5, axis='both')

    # 合并图例
    lines_1, labels_1 = ax1.get_legend_handles_labels()
    lines_2, labels_2 = ax2.get_legend_handles_labels()
    lines = lines_1 + lines_2
    labels = labels_1 + labels_2
    ax1.legend(lines, labels, loc='center', bbox_to_anchor=(0.5, -0.15), fancybox=True, ncol=5)
    # ax1.set_xlabel('DOY (2015)')
    plt.text(len(julian_day_list) - 2, 0.9, '{}'.format(station), fontdict=dict(fontsize=18,  fontweight='bold', family='SimHei'))
    ax1.set_xticks(np.arange(0, len(julian_day_list)), julian_day_list, rotation=45)
    ax2.set_ylabel('SM (m³/m³) and EVI')
    ax2.set_ylim(0, 1)
    ax2.set_yticks(np.arange(0, 1.1, 0.2))
    ax1.set_ylabel('Precipitation(mm)')
    ax1.set_ylim(0, 56)
    ax1.set_yticks(np.arange(0, 57, 7))
    ax1.set_xlim(-1, len(julian_day_list))
    plt.tight_layout()
    plt.show()






if __name__ == '__main__':
    stm_dir = r"G:\test\SMAP_NEW\Data_separate_files_header_20140101_20141231_10684_MgjP_20231106\OZNET"
    # get_OZNET_data(stm_dir)
    oznet_data_path = r"G:\test\SMAP_NEW\OZNET_data.csv"

    # tif_dir = r"G:\test\SMAP_NEW\srtm_dem.tif"
    # tif_dir = r"G:\test\SMAP_NEW\landcover_2015.tif"
    # tif_dir = r"G:\test\SMAP_NEW\CMORPH_tif"
    # read_tif_to_excel(tif_dir, oznet_data_path, 'CMORPH')

    # for name in ['evi', 'msavi', 'ndvi', 'nirv', 'fpar', 'lai']:
    #     # SM_dir = r"G:\test\SMAP_NEW\2015_downscal_avdi\{}".format(name)
    #     # read_tif_to_excel(SM_dir, oznet_data_path, '{}'.format(name + '_downscal'))
    #     # SM_dir = r"G:\test\SMAP_NEW\2015_avdi\{}".format(name)
    #     # read_tif_to_excel(SM_dir, oznet_data_path, '{}'.format(name + '_avdi'))
    #     SM_dir = r"G:\test\SMAP_NEW\2015\{}".format(name)
    #     read_tif_to_excel(SM_dir, oznet_data_path, '{}'.format(name))
    # corr_analys(oznet_data_path)
    # corr_measure_smap(oznet_data_path, 'mean', 'cci')
    # corr_measure_smap_dem_land(oznet_data_path, 'mean', 'lai_downscal')
    # corr_measure_smap_dem_land_with_station(oznet_data_path, 'mean', 'lai_downscal')
    # corr_downscal_with_station(oznet_data_path, 'mean', 'lai_downscal', 'station')
    # for type in ['evi', 'msavi', 'ndvi', 'nirv', 'fpar', 'lai']:
    #     corr_measure_smap(oznet_data_path, 'mean', type + "_downscal")
    # corr_measure_smap(oznet_data_path, 'mean',type)
    # R_boxplot_with_dem_and_land(oznet_data_path)
    # R_boxplot(oznet_data_path, plot_type='month')
    # R_boxplot_with_dem_and_land(oznet_data_path)
    # for station in ['Y1', 'Y2', 'Y3', 'Y4', 'Y5', 'Y6', 'Y7', 'Y8', 'Y9', 'Y10', 'Y11', 'Y12', 'Y13', 'K10', 'K11', 'K12', 'K13', 'K14']:
    #     R_boxplot_by_station(oznet_data_path, station)

    # mcd12q1_dir = r"G:\test\SMAP_NEW\svtm_nsw_extant_pct_vc1_1_m1_1\SVTM_NSW_Extant_vC1_1_M1_1_5m_clip.tif"
    # read_landcover(mcd12q1_dir, oznet_data_path, 'landcover_SVTM')

    # Y5(146.2932,-34.7284)、Y8(146.414,-34.847)、Y6(145.8669,-34.8426)、Y7(146.1153,-34.8518)、K10(147.5348,-35.324)、K14(147.4974,-35.1249)
    plot_time_series_by_station(oznet_data_path, 'K10')

