"""
使用ATI+VI进行尝试降尺度，采用特征空间+多项式拟合
"""
import datetime
import math
import os.path

import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from osgeo import gdal
from osgeo.gdalconst import GDT_Float32
from scipy.optimize import curve_fit


def write_tif(file_path, data, geotrans, projection, nodata, gdal_type):
    driver = gdal.GetDriverByName("GTiff")
    rows, cols = data.shape
    dataset = driver.Create(file_path, cols, rows, 1, gdal_type)
    dataset.SetGeoTransform(geotrans)
    # 定义投影
    # prj = osr.SpatialReference()
    # prj.ImportFromEPSG(4326)
    # dataset.SetProjection(prj.ExportToWkt())
    dataset.SetProjection(projection)
    band = dataset.GetRasterBand(1)
    band.WriteArray(data)
    band.SetNoDataValue(nodata)
    dataset.FlushCache()
    print("写入成功：{}".format(file_path))


def mean_filter(image, kernel_size=3):
    """
    均值滤波
    :param image:
    :param kernel_size:
    :return:
    """
    h, w = image.shape
    filterd = np.zeros((h, w))
    half_kernel_size = kernel_size // 2

    for i in range(half_kernel_size, h - half_kernel_size):
        for j in range(half_kernel_size, w - half_kernel_size):
            filterd[i, j] = np.nanmean(image[i - half_kernel_size:i + half_kernel_size + 1, j - half_kernel_size:j + half_kernel_size + 1])
    return filterd


# 计算太阳校正系数
def get_sun_correction_factor(phi, nd):
    # 公式： C = sinφsinδ(1 - tan²φtan²δ)^0.5 + cosφcosδarccos(-tanφtanδ)
    # δ = 0.006918 - 0.399912cos(Γ) + 0.070257sin(Γ) - 0.006758cos(2Γ) + 0.000907sin(2Γ) - 0.002697cos(3Γ) + 0.00148sin(3Γ)
    # Γ = (2 * π * (nd - 1)) / 365.25
    # φ是纬度 δ为太阳赤纬 Γ为昼角 nd为日数
    gamma = (2 * np.pi * (nd - 1)) / 365.25
    delta = 0.006918 - 0.399912 * np.cos(gamma) + 0.070257 * np.sin(gamma) \
            - 0.006758 * np.cos(2 * gamma) + 0.000907 * np.sin(2 * gamma) \
            - 0.002697 * np.cos(3 * gamma) + 0.00148 * np.sin(3 * gamma)
    # 百度百科
    # delta = np.arcsin(0.39795 * np.cos(0.98563 * (nd - 173)))
    print(gamma)
    print(delta * 180 / np.pi)
    C = np.sin(phi) * np.sin(delta) * (1 - np.tan(phi) ** 2 * np.tan(delta) ** 2) ** 0.5 + \
        np.cos(phi) * np.cos(delta) * np.arccos(-np.tan(phi) * np.tan(delta))
    return C


# 计算ati
def get_ati(day_lst, night_lst, albedo, doy, geo):
    # 对数据进行筛选，剔除不满足计算要求的数据，主要有：
    # nodata数据、异常值数据
    day_lst[day_lst <= 0] = np.nan
    night_lst[night_lst <= 0] = np.nan
    albedo[albedo <= 0] = np.nan
    albedo[albedo == -9999] = np.nan

    # 实现太阳校正系数校正
    lat_arr = np.zeros_like(day_lst, dtype=np.float32)
    for i in range(day_lst.shape[0]):
        for j in range(day_lst.shape[1]):
            lat_arr[i, j] = math.radians(geo[3] + geo[4] * i + geo[5] * j)

    C = get_sun_correction_factor(lat_arr, doy)

    # 计算ati
    ati = C * (1 - albedo * 0.0001) / (day_lst - night_lst)
    # WMA滤波
    ati = mean_filter(ati, 3)
    # 剔除异常值
    ati[ati > 1] = np.nan
    ati[ati < 0] = np.nan
    return ati


# 计算特征空间
def feature_space(X, Y):
    """
    :param X: 自变量
    :param Y: 因变量
    :return: 特征空间拟合系数
    """
    # 对输入数据进行异常值剔除，最好在输入前剔除，这里仅做通用剔除
    X[np.isinf(X)] = np.nan
    Y[np.isinf(Y)] = np.nan
    # 归一化
    # X = (X - np.nanmean(X)) / (np.nanmax(X) - np.nanmin(X))
    # Y = (Y - np.nanmean(Y)) / (np.nanmax(Y) - np.nanmin(Y))
    # 第一步，将evi划分为M个区间（建议M>=20)，每个区间分成N个子区间（建议N>=5)
    M = 20
    N = 5
    # 制定自变量取值一级区间
    x_interval = np.linspace(np.nanmin(X), np.nanmax(X), M + 1, endpoint=True)
    # print(x_interval)
    # 第二步，对给定区间找出每个子区间的最高因变量和最低因变量保存
    y_max_list = []
    y_min_list = []
    # 满足条件的自变量节点
    x_max_list = []
    x_min_list = []
    for i in range(M):
        # print("计算区间{}".format(i + 1))
        # 制定子区间取值
        x_sub_interval = np.linspace(x_interval[i], x_interval[i + 1], N + 1, endpoint=True)
        # print(x_sub_interval)
        # 存储子区间中的最大值和最小值
        y_sub_max_list = []
        y_sub_min_list = []
        for j in range(N):
            x_sub_interval_left = x_sub_interval[j]
            x_sub_interval_right = x_sub_interval[j + 1]
            # 获取在这个子区间里面的因变量值
            y_sub_interval_value = Y[np.where((X >= x_sub_interval_left) & (X < x_sub_interval_right))]
            # print(y_sub_interval_value)
            if y_sub_interval_value.size != 0:
                y_sub_max = np.nanmax(y_sub_interval_value)
                y_sub_min = np.nanmin(y_sub_interval_value)
                if ~np.isnan(y_sub_max):
                    y_sub_max_list.append(y_sub_max)
                if ~np.isnan(y_sub_min):
                    y_sub_min_list.append(y_sub_min)
        # 判断该区间是否有值，没有值则跳过
        # print("y_sub_max_list: ", y_sub_max_list)
        if y_sub_max_list:
            need_remove = True
            y_max_mean = np.nan
            # 第三步，计算给定区间的N个子区间的标准偏差，直到满足偏差
            while need_remove:
                y_max_mean = np.nanmean(y_sub_max_list)
                # 如果里面不为nan的元素少于两个就不按照偏差剔除了
                if len(y_sub_max_list) > 2:
                    for j in range(len(y_sub_max_list)):
                        y_max_std = np.nanstd(y_sub_max_list)
                        # 第四步，如果给定区间的每个子区间的最高温度小于平均值-偏差，则在下面步骤丢弃该子区间
                        # todo 待商榷
                        if y_sub_max_list[j] < (y_max_mean - y_max_std) and ~np.isnan(y_sub_max_list[j]):
                            y_sub_max_list.remove(y_sub_max_list[j])
                            need_remove = True
                            break
                        else:
                            need_remove = False
                else:
                    break
            if ~np.isnan(y_max_mean):
                y_max_list.append(y_max_mean)
                x_max_list.append(x_interval[i])
        # print("y_sub_min_list: ", y_sub_min_list)
        if y_sub_min_list:
            need_remove = True
            y_min_mean = np.nan
            # 第三步，计算给定区间的N个子区间的标准偏差，直到满足偏差
            while need_remove:
                # print("y_sub_min_list: ", y_sub_min_list)
                y_min_mean = np.nanmean(y_sub_min_list)
                # 如果里面元素少于两个就不按照偏差剔除了
                if len(y_sub_min_list) > 2:
                    for j in range(len(y_sub_min_list)):
                        y_min_std = np.nanstd(y_sub_min_list)
                        # 第四步，如果给定区间的每个子区间的最高温度小于平均值-偏差，则在下面步骤丢弃该子区间
                        # todo 待商榷
                        if y_sub_min_list[j] < (y_min_mean - y_min_std) and ~np.isnan(y_sub_min_list[j]):
                            y_sub_min_list.remove(y_sub_min_list[j])
                            need_remove = True
                            break
                        else:
                            need_remove = False
                else:
                    break
            if ~np.isnan(y_min_mean):
                y_min_list.append(y_min_mean)
                x_min_list.append(x_interval[i])
    # print(x_min_list)
    # print(y_min_list)
    # print(x_max_list)
    # print(y_max_list)
    # todo 仍然会存在部分离散异常最大值，造成的原因是子区间本身就是异常的，可以通过滤波实现滤除再拟合

    # 第六步，如果给定区间内剩余子区间数量大于给定阈值且标准差大于给定阈值，则返回步骤四，重复步骤四-六。否则转到步骤七
    # 看是否给阈值，这里暂时给了2个元素
    # 第七步，取平均温度作为该区间的最高温度，并返回步骤二，知道找到M个区间内所有最高温度。
    # 前面循环已做
    # 第八步，线性回归，并计算RMSE。
    a, b, c, d = 0, 0, 0, 0
    # 拟合干边
    need_fit = True
    while need_fit:
        y_max_arr = np.array(y_max_list)
        x_max_arr = np.array(x_max_list)
        popt = np.polyfit(x_max_arr, y_max_arr, 1)
        a = popt[0]
        b = popt[1]
        # print(a, b)
        y_max_fit = a * x_max_arr + b
        y_max_fit_rmse = np.sqrt(np.mean((y_max_arr - y_max_fit) ** 2))
        if len(y_max_list) > 2:
            for i in range(len(y_max_list)):
                # 第九步，如果给定区间最高温度是RMSE的2倍或大于等于回归线上的温度值，则该区间将被丢失，返回步骤八，直到达到最小区间数或不能在丢弃任何区间为止
                if y_max_list[i] < y_max_fit[i] - y_max_fit_rmse * 2:
                    y_max_list.remove(y_max_list[i])
                    x_max_list.remove(x_max_list[i])
                    need_fit = True
                    break
                else:
                    need_fit = False
        else:
            break

    # 拟合湿边
    need_fit = True
    while need_fit:
        y_min_arr = np.array(y_min_list)
        x_min_arr = np.array(x_min_list)
        popt = np.polyfit(x_min_arr, y_min_arr, 1)
        c = popt[0]
        d = popt[1]
        # print(c, d)
        y_min_fit = c * x_min_arr + d
        y_min_fit_rmse = np.sqrt(np.mean((y_min_arr - y_min_fit) ** 2))
        if len(y_min_list) > 2:
            for i in range(len(y_min_list)):
                # 第九步，如果给定区间最高温度是RMSE的2倍或大于等于回归线上的温度值，则该区间将被丢失，返回步骤八，直到达到最小区间数或不能在丢弃任何区间为止
                if y_min_list[i] < y_min_fit[i] - y_min_fit_rmse * 2:
                    y_min_list.remove(y_min_list[i])
                    x_min_list.remove(x_min_list[i])
                    need_fit = True
                    break
                else:
                    need_fit = False
        else:
            break

    # 第十步，执行最后的线性回归得到干边，
    # y_max_interval = a * x_interval + b
    # y_min_interval = c * x_interval + d
    # plt.title("NDVI-ATI")
    # plt.scatter(X.flatten(), Y.flatten(), c='r', marker='o')
    # plt.plot(x_interval, y_max_interval, c='b', label='max')
    # plt.plot(x_interval, y_min_interval, c='g', label='min')
    # plt.xlabel("NDVI")
    # plt.ylabel("ATI")
    # plt.legend()
    # plt.show()
    return a, b, c, d


def feature_space_version2(X, Y):
    """
    :param X: 自变量
    :param Y: 因变量
    :return: 特征空间拟合系数
    """
    # 对输入数据进行异常值剔除，最好在输入前剔除，这里仅做通用剔除
    X[np.isinf(X)] = np.nan
    Y[np.isinf(Y)] = np.nan
    # 归一化
    # 第一步，将evi划分为M个区间（建议M>=20)，每个区间分成N个子区间（建议N>=5)
    M = 25
    N = 5
    # 制定自变量取值一级区间
    x_interval = np.linspace(np.nanmin(X), np.nanmax(X), M + 1, endpoint=True)
    # print(x_interval)
    # 第二步，对给定区间找出每个子区间的最高因变量和最低因变量保存
    y_max_list = []
    y_min_list = []
    # 满足条件的自变量节点
    x_max_list = []
    x_min_list = []
    for i in range(M):
        # print("计算区间{}".format(i + 1))
        # 制定子区间取值
        x_sub_interval = np.linspace(x_interval[i], x_interval[i + 1], N + 1, endpoint=True)
        # print(x_sub_interval)
        # 存储子区间中的最大值和最小值
        y_sub_max_list = []
        y_sub_min_list = []
        for j in range(N):
            x_sub_interval_left = x_sub_interval[j]
            x_sub_interval_right = x_sub_interval[j + 1]
            # 获取在这个子区间里面的因变量值
            y_sub_interval_value = Y[np.where((X >= x_sub_interval_left) & (X < x_sub_interval_right))]
            # print(y_sub_interval_value)
            if y_sub_interval_value.size != 0:
                y_sub_max = np.nanmax(y_sub_interval_value)
                y_sub_min = np.nanmin(y_sub_interval_value)
                if ~np.isnan(y_sub_max):
                    y_sub_max_list.append(y_sub_max)
                if ~np.isnan(y_sub_min):
                    y_sub_min_list.append(y_sub_min)
        # 判断该区间是否有值，没有值则跳过
        # print("y_sub_max_list: ", y_sub_max_list)
        y_sub_max_list = np.array(y_sub_max_list)
        y_sub_min_list = np.array(y_sub_min_list)
        if y_sub_max_list.shape[0] > 0:
            y_max_mean = np.nanmean(y_sub_max_list)
            y_max_std = np.nanstd(y_sub_max_list)
            y_sub_max_list = y_sub_max_list[np.where(y_sub_max_list < y_max_mean - y_max_std)]
            y_max_mean = np.nanmean(y_sub_max_list)
            if ~np.isnan(y_max_mean):
                y_max_list.append(y_max_mean)
                x_max_list.append(x_interval[i])
        if y_sub_min_list.shape[0] > 0:
            y_min_mean = np.nanmean(y_sub_min_list)
            y_min_std = np.nanstd(y_sub_min_list)
            y_sub_min_list = y_sub_min_list[np.where(y_sub_min_list < y_min_mean - y_min_std)]
            y_min_mean = np.nanmean(y_sub_min_list)
            if ~np.isnan(y_min_mean):
                y_min_list.append(y_min_mean)
                x_min_list.append(x_interval[i])

    # 拟合干边
    y_max_arr = np.array(y_max_list)
    x_max_arr = np.array(x_max_list)
    popt = np.polyfit(x_max_arr, y_max_arr, 1)
    a = popt[0]
    b = popt[1]
    y_min_arr = np.array(y_min_list)
    x_min_arr = np.array(x_min_list)
    popt = np.polyfit(x_min_arr, y_min_arr, 1)
    c = popt[0]
    d = popt[1]

    # 第十步，执行最后的线性回归得到干边，
    # y_max_interval = a * x_interval + b
    # y_min_interval = c * x_interval + d
    # plt.title("VI-ATI")
    # plt.scatter(X.flatten(), Y.flatten(), c='r', marker='o')
    # plt.plot(x_interval, y_max_interval, c='b', label='max')
    # plt.plot(x_interval, y_min_interval, c='g', label='min')
    # plt.xlabel("VI")
    # plt.ylabel("ATI")
    # plt.legend()
    # plt.show()
    return a, b, c, d


def keep_increasing(x_lst, y_lst):
    result_y = [y_lst[0]]  # 保留第一个元素
    result_x = [x_lst[0]]  # 保留第一个元素
    for i in range(1, len(y_lst)):
        if y_lst[i] > result_y[-1]:  # 当当前元素大于结果列表中的最后一个元素时，将其添加到结果列表中
            result_y.append(y_lst[i])
            result_x.append(x_lst[i])
    return result_x, result_y


def feature_space_version3(X, Y, vi_name):
    """
    :param X: 自变量
    :param Y: 因变量
    :return: 特征空间拟合系数
    """
    # 对输入数据进行异常值剔除，最好在输入前剔除，这里仅做通用剔除
    X[np.isinf(X)] = np.nan
    Y[np.isinf(Y)] = np.nan
    # 归一化
    M = 30
    N = 5
    # 制定自变量取值一级区间, 考虑是左侧区间还是右侧区间（正反三角形）修改此处区间
    # if np.nanmax(X) > 1:
    #     x_interval = np.linspace(np.nanmin(X) + 0.5, np.nanmax(X) - 0.5, M + 1, endpoint=True)
    # else:
    #     # x_interval = np.linspace(np.nanmin(X) + 0.05, np.nanmax(X) - 0.05, M + 1, endpoint=True)
    #     x_interval = np.linspace(np.nanmin(X), np.nanmax(X) - 0.05, M + 1, endpoint=True)
    # 根据不同植被指数划分区间
    if vi_name == "evi":
        x_interval = np.linspace(0.08, 0.3, M + 1, endpoint=True)
    elif vi_name == "ndvi":
        x_interval = np.linspace(0.2, 0.6, M + 1, endpoint=True)
    elif vi_name == "msavi":
        x_interval = np.linspace(0.07, 0.25, M + 1, endpoint=True)
    elif vi_name == "nirv":
        x_interval = np.linspace(0.02, 0.12, M + 1, endpoint=True)
    elif vi_name == "fpar":
        x_interval = np.linspace(0.1, 0.5, M + 1, endpoint=True)
    elif vi_name == "lai":
        x_interval = np.linspace(0.1, 1, M + 1, endpoint=True)
    else:
        x_interval = np.linspace(np.nanmin(X), np.nanmax(X), M + 1, endpoint=True)
    # 第二步，对给定区间找出每个子区间的最高因变量和最低因变量保存
    y_max_list = []
    y_min_list = []
    # 满足条件的自变量节点
    x_max_list = []
    x_min_list = []
    for i in range(M):
        # print("计算区间{}".format(i + 1))
        # 制定子区间取值
        x_sub_interval = np.linspace(x_interval[i], x_interval[i + 1], N + 1, endpoint=True)
        # print(x_sub_interval)
        # 存储子区间中的最大值和最小值
        y_sub_max_list = []
        y_sub_min_list = []
        for j in range(N):
            x_sub_interval_left = x_sub_interval[j]
            x_sub_interval_right = x_sub_interval[j + 1]
            # 获取在这个子区间里面的因变量值
            y_sub_interval_value = Y[np.where((X >= x_sub_interval_left) & (X < x_sub_interval_right))]
            # print(y_sub_interval_value)
            if y_sub_interval_value.size != 0:
                y_sub_max = np.nanmax(y_sub_interval_value)
                y_sub_min = np.nanmin(y_sub_interval_value)
                if ~np.isnan(y_sub_max):
                    y_sub_max_list.append(y_sub_max)
                if ~np.isnan(y_sub_min):
                    y_sub_min_list.append(y_sub_min)
        # 判断该区间是否有值，没有值则跳过
        y_sub_max_list = np.array(y_sub_max_list)
        y_sub_min_list = np.array(y_sub_min_list)
        # print(y_sub_max_list.shape[0])
        if y_sub_max_list.shape[0] > 1:
            y_max_mean = np.nanmean(y_sub_max_list)
            y_max_std = np.nanstd(y_sub_max_list)
            y_sub_max_list = y_sub_max_list[np.where((y_max_mean - 2 * y_max_std < y_sub_max_list) & (y_sub_max_list <= y_max_mean))]
            y_max_mean = np.nanmean(y_sub_max_list)
            if ~np.isnan(y_max_mean):
                y_max_list.append(y_max_mean)
                x_max_list.append(x_interval[i])
        elif y_sub_max_list.shape[0] == 1:
            y_max_list.append(y_sub_max_list[0])
            x_max_list.append(x_interval[i])
        if y_sub_min_list.shape[0] > 1:
            y_min_mean = np.nanmean(y_sub_min_list)
            y_min_std = np.nanstd(y_sub_min_list)
            y_sub_min_list = y_sub_min_list[np.where((y_min_mean - 2 * y_min_std < y_sub_min_list) & (y_sub_min_list <= y_min_mean))]
            y_min_mean = np.nanmean(y_sub_min_list)
            if ~np.isnan(y_min_mean):
                y_min_list.append(y_min_mean)
                x_min_list.append(x_interval[i])
        elif y_sub_min_list.shape[0] == 1:
            y_min_list.append(y_sub_min_list[0])
            x_min_list.append(x_interval[i])
    # x_max_list, y_max_list = keep_increasing(x_max_list, y_max_list)
    # 拟合干边
    y_max_arr = np.array(y_max_list)
    x_max_arr = np.array(x_max_list)
    # print("x_max_arr: ", x_max_arr)
    popt = np.polyfit(x_max_arr, y_max_arr, 1)
    a = popt[0]
    b = popt[1]
    y_min_arr = np.array(y_min_list)
    x_min_arr = np.array(x_min_list)
    popt = np.polyfit(x_min_arr, y_min_arr, 1)
    c = popt[0]
    d = popt[1]

    # 第十步，执行最后的线性回归得到干边，
    # x_fit = np.linspace(np.nanmin(X), np.nanmax(X), M + 1, endpoint=True)
    # y_max_interval = a * x_fit + b
    # y_min_interval = c * x_fit + d
    # plt.title("VI-ATI")
    # plt.scatter(X.flatten(), Y.flatten(), c='r', marker='o', s=5)
    # plt.scatter(x_min_arr, y_min_arr, c='g', marker='1')
    # plt.scatter(x_max_arr, y_max_arr, c='b', marker='2')
    # plt.plot(x_fit, y_max_interval, c='b', label='max')
    # plt.plot(x_fit, y_min_interval, c='g', label='min')
    # plt.xlabel("VI")
    # plt.ylabel("ATI")
    # plt.legend()
    # plt.show()
    return a, b, c, d


def feature_space_version4(X, Y, vi_name):
    """
    :param X: 自变量
    :param Y: 因变量
    :return: 特征空间拟合系数
    """
    # 对输入数据进行异常值剔除，最好在输入前剔除，这里仅做通用剔除
    X[np.isinf(X)] = np.nan
    Y[np.isinf(Y)] = np.nan
    # 归一化
    # 第一步，将evi划分为M个区间（建议M<=20)，每个区间分成N个子区间（建议N>=5)
    M = 10
    N = 5
    # 制定自变量取值一级区间
    if vi_name == "evi":
        x_interval = np.linspace(np.nanmin(X), 0.4, M + 1, endpoint=True)
    elif vi_name == "ndvi":
        x_interval = np.linspace(np.nanmin(X), 0.7, M + 1, endpoint=True)
    elif vi_name == "msavi":
        x_interval = np.linspace(np.nanmin(X), 0.4, M + 1, endpoint=True)
    elif vi_name == "nirv":
        x_interval = np.linspace(np.nanmin(X), 0.2, M + 1, endpoint=True)
    elif vi_name == "fpar":
        x_interval = np.linspace(np.nanmin(X), 0.6, M + 1, endpoint=True)
    elif vi_name == "lai":
        x_interval = np.linspace(np.nanmin(X), 6, M + 1, endpoint=True)
    else:
        x_interval = np.linspace(np.nanmin(X), np.nanmax(X), M + 1, endpoint=True)
    x_min_interval = np.linspace(np.nanmin(X) + 0.2, np.nanmax(X) - 0.1, M + 1, endpoint=True)
    # 第二步，对给定区间找出每个子区间的最高因变量和最低因变量保存
    y_max_list = []
    y_min_list = []
    # 满足条件的自变量节点
    x_max_list = []
    x_min_list = []
    for i in range(M):
        # print("计算区间{}".format(i + 1))
        # 制定子区间取值
        x_sub_interval = np.linspace(x_interval[i], x_interval[i + 1], N + 1, endpoint=True)
        x_min_sub_interval = np.linspace(x_min_interval[i], x_min_interval[i + 1], N + 1, endpoint=True)
        # print(x_sub_interval)
        # 存储子区间中的最大值和最小值
        y_sub_max_list = []
        y_sub_min_list = []
        for j in range(N):
            x_sub_interval_left = x_sub_interval[j]
            x_min_sub_interval_left = x_min_sub_interval[j]
            x_sub_interval_right = x_sub_interval[j + 1]
            x_min_sub_interval_right = x_min_sub_interval[j + 1]
            # 获取在这个子区间里面的因变量值
            y_sub_interval_value = Y[np.where((X >= x_sub_interval_left) & (X < x_sub_interval_right))]
            y_min_sub_interval_value = Y[np.where((X >= x_min_sub_interval_left) & (X < x_min_sub_interval_right))]
            if y_sub_interval_value.size != 0:
                y_sub_max = np.nanmax(y_sub_interval_value)
                if ~np.isnan(y_sub_max):
                    y_sub_max_list.append(y_sub_max)
            if y_min_sub_interval_value.size != 0:
                y_sub_min = np.nanmin(y_min_sub_interval_value)
                if ~np.isnan(y_sub_min):
                    y_sub_min_list.append(y_sub_min)
        # 判断该区间是否有值，没有值则跳过
        # print("y_sub_max_list: ", y_sub_max_list)
        # y_sub_max_list = np.array(y_sub_max_list)
        y_sub_min_list = np.array(y_sub_min_list)
        if y_sub_max_list:
            need_remove = True
            y_max_mean = np.nan
            # 第三步，计算给定区间的N个子区间的标准偏差，直到满足偏差
            while need_remove:
                y_max_mean = np.nanmean(y_sub_max_list)
                # 如果里面不为nan的元素少于两个就不按照偏差剔除了
                if len(y_sub_max_list) > 2:
                    for j in range(len(y_sub_max_list)):
                        y_max_std = np.nanstd(y_sub_max_list)
                        # 第四步，如果给定区间的每个子区间的最高温度小于平均值-偏差，则在下面步骤丢弃该子区间
                        # todo 待商榷
                        if y_sub_max_list[j] < (y_max_mean - y_max_std) and ~np.isnan(y_sub_max_list[j]):
                            y_sub_max_list.remove(y_sub_max_list[j])
                            need_remove = True
                            break
                        else:
                            need_remove = False
                else:
                    break
            if ~np.isnan(y_max_mean):
                y_max_list.append(y_max_mean)
                x_max_list.append(x_interval[i])
        if y_sub_min_list.shape[0] > 0:
            y_min_mean = np.nanmean(y_sub_min_list)
            y_min_std = np.nanstd(y_sub_min_list)
            y_sub_min_list = y_sub_min_list[np.where((y_min_mean - 3 * y_min_std < y_sub_min_list) & (y_sub_min_list < y_min_mean))]
            y_min_mean = np.nanmean(y_sub_min_list)
            if ~np.isnan(y_min_mean):
                y_min_list.append(y_min_mean)
                x_min_list.append(x_min_interval[i])

    a, b = 0, 0
    # 拟合干边
    y_max_arr = np.array(y_max_list)
    x_max_arr = np.array(x_max_list)
    need_fit = True
    while need_fit:
        popt = np.polyfit(x_max_arr, y_max_arr, 1)
        a = popt[0]
        b = popt[1]
        # print(a, b)
        y_max_fit = a * x_max_arr + b
        y_max_fit_rmse = np.sqrt(np.mean((y_max_arr - y_max_fit) ** 2))
        if len(y_max_list) > 2:
            for i in range(len(y_max_list)):
                # 第九步，如果给定区间最高温度是RMSE的2倍或大于等于回归线上的温度值，则该区间将被丢失，返回步骤八，直到达到最小区间数或不能在丢弃任何区间为止
                if y_max_list[i] < y_max_fit[i] - y_max_fit_rmse * 2:
                    y_max_list.remove(y_max_list[i])
                    x_max_list.remove(x_max_list[i])
                    need_fit = True
                    break
                else:
                    need_fit = False
        else:
            break
    # 湿边拟合
    y_min_arr = np.array(y_min_list)
    x_min_arr = np.array(x_min_list)
    popt = np.polyfit(x_min_arr, y_min_arr, 1)
    c = popt[0]
    d = popt[1]

    # 第十步，执行最后的线性回归得到干边，
    # x_fit = np.linspace(np.nanmin(X), np.nanmax(X), M + 1, endpoint=True)
    # y_max_interval = a * x_fit + b
    # y_min_interval = c * x_fit + d
    # plt.title("VI-ATI")
    # plt.scatter(X.flatten(), Y.flatten(), c='r', marker='o', s=5)
    # plt.scatter(x_min_arr, y_min_arr, c='g', marker='1')
    # plt.scatter(x_max_arr, y_max_arr, c='b', marker='2')
    # plt.plot(x_fit, y_max_interval, c='b', label='max')
    # plt.plot(x_fit, y_min_interval, c='g', label='min')
    # plt.xlabel("VI")
    # plt.ylabel("ATI")
    # plt.legend()
    # plt.show()
    return a, b, c, d


# 计算AVDI
def get_avdi(ati, vi, a, b, c, d):
    ati_max = a * vi + b
    ati_min = c * vi + d
    # avdi = (ati - ati_min) / (ati_max - ati_min)
    avdi = (ati_max - ati) / (ati_max - ati_min)
    avdi[(avdi < 0) | (avdi > 1)] = np.nan
    return avdi


# 根据像元行列号计算经纬度
def pixel2coord(geo_transform, col, row):
    x_origin, pixel_width, x_rotation, y_origin, y_rotation, pixel_height = geo_transform
    lon = x_origin + pixel_width * col
    lat = y_origin + pixel_height * row
    return lon, lat


# 根据经纬度计算像元行列号
def coord2pixel(geo_transform, lon, lat):
    col = int((lon - geo_transform[0]) / geo_transform[1])
    row = int((lat - geo_transform[3]) / geo_transform[5])
    return col, row


# 计算一个粗分辨率像元对应高分辨像元的平均值
def get_high_res_pixels(high_res_data, high_res_geo_transform, low_res_geo_transform, y_low_res_pixel, x_low_res_pixel, low_res_to_high_res_ratio):
    # 计算低分辨率影像像元在高分辨率影像中的范围
    x_low_res_geo, y_low_res_geo = pixel2coord(low_res_geo_transform, x_low_res_pixel, y_low_res_pixel)
    # x_low_res_geo = x_low_res_geo + low_res_geo_transform[1] / 2  # 左右偏移
    # y_low_res_geo = y_low_res_geo + low_res_geo_transform[5] / 2  # 上下偏移

    # 计算行列号范围
    high_res_x, high_res_y = coord2pixel(high_res_geo_transform, x_low_res_geo, y_low_res_geo)

    high_res_x_min = int(high_res_x - low_res_to_high_res_ratio / 2)
    high_res_x_max = int(high_res_x + low_res_to_high_res_ratio / 2)
    high_res_y_min = int(high_res_y - low_res_to_high_res_ratio / 2)
    high_res_y_max = int(high_res_y + low_res_to_high_res_ratio / 2)

    # 排除边缘像元的影响
    if high_res_x_min < 0 and high_res_x_max > 0:
        high_res_x_min = 0
    if high_res_x_min < high_res_data.shape[1] < high_res_x_max:
        high_res_x_max = high_res_data.shape[1]
    if high_res_y_min < 0 and high_res_y_max > 0:
        high_res_y_min = 0
    if high_res_y_min < high_res_data.shape[0] < high_res_y_max:
        high_res_y_max = high_res_data.shape[0]

    # 获取范围内的像元值
    if 0 <= high_res_x_min <= high_res_data.shape[1] and 0 <= high_res_y_min <= high_res_data.shape[0]:
        high_res_pixels = high_res_data[high_res_y_min:high_res_y_max, high_res_x_min:high_res_x_max]
        return high_res_pixels.astype(np.float32)
    else:
        return []


# 降尺度过程
def downscal_by_avdi(sm, avdi, sm_geo, avdi_geo, avdi_projection, output_path):
    # 计算sm中每个像元对应avdi的平均值
    avdi_mean_in_sm = np.zeros(sm.shape)
    res_ratio = int(sm_geo[1] / avdi_geo[1])
    # print(avdi_geo, sm_geo)
    for i in range(sm.shape[0]):
        for j in range(sm.shape[1]):
            # print(get_high_res_pixels(avdi, avdi_geo, sm_geo, i, j, res_ratio))
            avdi_mean_in_sm[i, j] = np.nanmean(get_high_res_pixels(avdi, avdi_geo, sm_geo, i, j, res_ratio))
    # 按照高分辨率影像像元计算降尺度结果
    sm_downscale = np.zeros(avdi.shape)
    for i in range(sm_downscale.shape[0]):
        for j in range(sm_downscale.shape[1]):
            lon, lat = pixel2coord(avdi_geo, j, i)
            sm_col, sm_row = coord2pixel(sm_geo, lon, lat)
            sm_orign = sm[sm_row, sm_col]
            avdi_mean = avdi_mean_in_sm[sm_row, sm_col]
            a = 2 * (sm_orign / math.acos(1 - 2 * (1 - avdi_mean)))
            b = math.sqrt(1 - (1 - 2 * (1 - avdi[i, j])) ** 2)
            c = (1 - avdi[i, j]) - (1 - avdi_mean)
            sm_downscale[i, j] = sm_orign + a / b * c
            # print("原始数据：", sm_orign,  "avdi: ", avdi[i, j], "平均值：", avdi_mean, "降尺度结果：", sm_downscale[i, j])
    sm_downscale[(sm_downscale < 0) | (sm_downscale > 1)] = -9999
    write_tif(output_path, sm_downscale, avdi_geo, avdi_projection, -9999, GDT_Float32)


def downscal_by_weight(sm, avdi, sm_geo, avdi_geo, atdi_projection, output_path):
    # 计算sm中每个像元对应avdi的平均值
    # atdi_mean_in_sm = np.zeros(sm.shape)
    res_ratio = int(sm_geo[1] / avdi_geo[1])
    # print(avdi_geo, sm_geo)
    # for i in range(sm.shape[0]):
    #     for j in range(sm.shape[1]):
    #         # print(get_high_res_pixels(avdi, avdi_geo, sm_geo, i, j, res_ratio))
    #         atdi_mean_in_sm[i, j] = np.nanmean(get_high_res_pixels(avdi, avdi_geo, sm_geo, i, j, res_ratio))
    # 按照高分辨率影像像元计算降尺度结果
    sm_downscale = np.zeros(avdi.shape)
    for i in range(sm_downscale.shape[0]):
        for j in range(sm_downscale.shape[1]):
            lon, lat = pixel2coord(avdi_geo, j, i)
            sm_col, sm_row = coord2pixel(sm_geo, lon, lat)
            sm_orign = sm[sm_row, sm_col]
            # avdi_mean = atdi_mean_in_sm[sm_row, sm_col]
            avdi_mean = np.nanmean(get_high_res_pixels(avdi, avdi_geo, sm_geo, sm_row, sm_col, res_ratio))
            sm_downscale[i, j] = sm_orign * ((1 - avdi[i, j]) / (1 - avdi_mean))
            # print("原始数据：", sm_orign,  "avdi: ", avdi[i, j], "平均值：", avdi_mean, "降尺度结果：", sm_downscale[i, j])
    sm_downscale[(sm_downscale < 0) | (sm_downscale > 1)] = -9999
    write_tif(output_path, sm_downscale, avdi_geo, atdi_projection, -9999, GDT_Float32)


# 拟合公式
def fit_formula(X, a, b, c, d, e, f):
    ati, evi = X.T  # 转置以匹配合并的数组
    # 经验公式
    sm = a * np.nanmean(np.power(d, (evi + e))) + b * np.nanmean(np.log(ati + c) * np.power(d, (evi + e))) + f
    # sm = a * np.nanmean(np.power(d, (evi + e))) + b * np.nanmean(np.log(ati + c + 1e-5) * np.power(d, (evi + e))) + f  # 1e-5是为了排除当数值很小时，取对数后结果趋于负无穷大
    return sm


# 降尺度公式
def down_scaling_naqu(X, a, b, c, d, e, f):
    ati, evi = X.T  # 转置以匹配合并的数组
    sm = (a + b * np.log(ati + c)) * np.power(d, (evi + e)) + f
    return sm


# 使用那曲地区方法降尺度
def downscal_by_naqu(sm, ati, vi, sm_geo, vi_geo, vi_projection, output_path):
    res_ratio = int(sm_geo[1] / vi_geo[1])
    ati_in_sm = []
    vi_in_sm = []
    sm_data = []
    # print(ati_in_sm)
    # 遍历cci每个像元，获取该像元中的所有ati和evi数据（一个二维数组）
    for row in range(0, sm.shape[0]):
        for col in range(0, sm.shape[1]):
            lon, lat = pixel2coord(sm_geo, col, row)
            if 145.5 <= lon <= 148 and -35.5 <= lat <= -34.5 and 0 <= sm[row, col] <= 1:
                # print(lon, lat)
                # print("正在处理第", row, col, "个像元")
                # print(sm_geo)
                ati_in_sm.append(get_high_res_pixels(ati, vi_geo, sm_geo, row, col, res_ratio))
                vi_in_sm.append(get_high_res_pixels(vi, vi_geo, sm_geo, row, col, res_ratio))
                sm_data.append(sm[row, col])
    if len(sm_data) < 6:
        print("数据量太少，无法进行降尺度处理")
        return
    # ati_in_sm = ati_in_sm[(sm_data >= 0) & (sm_data <= 1)]
    # vi_in_sm = vi_in_sm[(sm_data >= 0) & (sm_data <= 1)]
    # sm_data = sm_data[(sm_data >= 0) & (sm_data <= 1)]
    ati_in_sm = [np.where(arr < 0, np.nan, arr) for arr in ati_in_sm]
    ati_in_sm = [np.where(arr > 1, np.nan, arr) for arr in ati_in_sm]
    vi_in_sm = [np.where(arr < 0, np.nan, arr) for arr in vi_in_sm]
    vi_in_sm = [np.where(arr > 1, np.nan, arr) for arr in vi_in_sm]

    X1 = np.vstack((np.concatenate([arr.flatten() for arr in ati_in_sm]), np.concatenate([arr.flatten() for arr in vi_in_sm]))).T
    popt, pcov = curve_fit(fit_formula, X1, sm_data, maxfev=1000000, check_finite=False)
    print("计算结果：", popt)
    # 按照高分辨率影像像元计算降尺度结果
    X2 = np.vstack((ati.flatten(), vi.flatten())).T
    sm_downscale = down_scaling_naqu(X2, *popt)
    sm_downscale = sm_downscale.reshape(vi.shape)
    sm_downscale[(sm_downscale < 0) | (sm_downscale > 1)] = -9999
    write_tif(output_path, sm_downscale, vi_geo, vi_projection, -9999, GDT_Float32)


# 遍历数据
def data_iter(lst_day_dir, lst_night_dir, albedo_dir, vi_dir, SM_dir, output_dir):
    lst_day_list = [os.path.join(lst_day_dir, x) for x in os.listdir(lst_day_dir) if x.endswith('.tif')]
    lst_night_list = [os.path.join(lst_night_dir, x) for x in os.listdir(lst_night_dir) if x.endswith('.tif')]
    albedo_list = [os.path.join(albedo_dir, x) for x in os.listdir(albedo_dir) if x.endswith('.tif')]
    vi_list = [os.path.join(vi_dir, x) for x in os.listdir(vi_dir) if x.endswith('.tif')]
    SM_list = [os.path.join(SM_dir, x) for x in os.listdir(SM_dir) if x.endswith('.tif')]
    for lst_day_path, lst_night_path, albedo_path, vi_path, sm_path in zip(lst_day_list, lst_night_list, albedo_list, vi_list, SM_list):
        lst_day = gdal.Open(lst_day_path)
        lst_day_data = lst_day.ReadAsArray()
        lst_geo = lst_day.GetGeoTransform()
        lst_proj = lst_day.GetProjection()
        lst_night_data = gdal.Open(lst_night_path).ReadAsArray(buf_ysize=lst_day_data.shape[0], buf_xsize=lst_day_data.shape[1])
        albedo_data = gdal.Open(albedo_path).ReadAsArray(buf_ysize=lst_day_data.shape[0], buf_xsize=lst_day_data.shape[1])
        vi_data = gdal.Open(vi_path).ReadAsArray(buf_ysize=lst_day_data.shape[0], buf_xsize=lst_day_data.shape[1])
        ati = get_ati(lst_day_data, lst_night_data, albedo_data)
        sm = gdal.Open(sm_path)
        sm_data = sm.ReadAsArray()
        sm_geo = sm.GetGeoTransform()
        # print(ati)
        # print(np.nanmin(ati))
        # print(np.nanmax(ati))
        vi_data[vi_data == -9999] = np.nan
        vi_data[vi_data < 0] = np.nan
        a, b, c, d = feature_space(vi_data, ati)
        # print(a, b, c, d)
        # 计算avdi
        avdi = get_avdi(ati, vi_data, a, b, c, d)
        # 降尺度
        output_path = os.path.join(output_dir, os.path.basename(lst_day_path))
        downscal_by_avdi(sm_data, avdi, sm_geo, lst_geo, lst_proj, output_path)


# 读取降尺度结果到excel表格
def read_sm_to_excel(sm_dir, down_scal_dir, excel_path):
    ctp_df = pd.read_csv(excel_path)
    print(ctp_df)
    # sm_list = [os.path.join(sm_dir, i) for i in os.listdir(sm_dir) if i.endswith(".tif")]
    for index, row in ctp_df.iterrows():
        date = row["date"]
        lon = row["lon"]
        lat = row["lat"]
        # 根据日期计算儒略日
        targetday = datetime.date(int(str(date)[0:4]), int(str(date)[4:6]), int(str(date)[6:8]))
        day = targetday - datetime.date(targetday.year - 1, 12, 31)
        julian_day = str(date)[0:4] + str(day.days)

        # sm_path = os.path.join(sm_dir, str(date) + ".tif")
        # sm = gdal.Open(sm_path)
        # sm_data = sm.ReadAsArray()
        # sm_geo = sm.GetGeoTransform()
        # col, row = coord2pixel(sm_geo, lon, lat)
        # sm_value = sm_data[row, col]
        # if sm_value != -9999:
        #     ctp_df.loc[index, 'SM'] = sm_value
        #

        sm_downscal_path = os.path.join(down_scal_dir, str(julian_day) + ".tif")
        sm_downscal = gdal.Open(sm_downscal_path)
        sm_downscal_data = sm_downscal.ReadAsArray()
        sm_downscal_geo = sm_downscal.GetGeoTransform()
        sm_downscal_col, sm_downscal_row = coord2pixel(sm_downscal_geo, lon, lat)
        sm_downscal_value = sm_downscal_data[sm_downscal_row, sm_downscal_col]
        if 0 <= sm_downscal_value <= 1:
            ctp_df.loc[index, 'evi'] = sm_downscal_value
    print(ctp_df)
    ctp_df.to_csv(excel_path, index=False)
    print("写入完成")


# 相关性分析
def corr_analys(excel_path):
    ctp_df = pd.read_csv(excel_path)
    # 计算皮尔逊相关系数
    ctp_df['mean_value'] = ctp_df['mean_value'].astype(float)
    ctp_df['SM'] = ctp_df['SM'].astype(float)
    ctp_df['SM_downscal'] = ctp_df['SM_downscal'].astype(float)
    sm_df = ctp_df[['mean_value', 'SM', 'SM_downscal']]
    # 去掉存在nan的行
    sm_df = sm_df.dropna(axis=0, how='any')
    # 计算相关系数
    corr = sm_df.corr(method='pearson')
    # 拟合系数
    measure_ctp_coeff = np.polyfit(sm_df['mean_value'], sm_df['SM'], 1)
    measure_downscal_coeff = np.polyfit(sm_df['mean_value'], sm_df['SM_downscal'], 1)
    ctp_downscal_coeff = np.polyfit(sm_df['SM'], sm_df['SM_downscal'], 1)
    # 拟合结果
    measure_ctp_fit = measure_ctp_coeff[0] * sm_df['mean_value'] + measure_downscal_coeff[1]
    measure_downscal_fit = measure_downscal_coeff[0] * sm_df['mean_value'] + measure_downscal_coeff[1]
    ctp_downscal_fit = ctp_downscal_coeff[0] * sm_df['SM'] + ctp_downscal_coeff[1]

    measure_argmin = np.argmin(sm_df['mean_value'])
    measure_argmax = np.argmax(sm_df['mean_value'])
    ctp_argmin = np.argmin(sm_df['SM'])
    ctp_argmax = np.argmax(sm_df['SM'])

    # 绘制结果
    plt.figure(figsize=(12, 4))
    plt.subplot(131)
    plt.title("Measure-CTP")
    plt.xlabel("Measure")
    plt.ylabel("CTP")
    plt.scatter(sm_df['mean_value'], sm_df['SM'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mean_value'].to_numpy()[measure_argmin], sm_df['mean_value'].to_numpy()[measure_argmax]],
             [measure_ctp_fit.to_numpy()[measure_argmin], measure_ctp_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_ctp_coeff[0], measure_ctp_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean_value']['SM']))

    plt.subplot(132)
    plt.title("Measure-Downscal")
    plt.xlabel("Measure")
    plt.ylabel("Downscal")
    plt.scatter(sm_df['mean_value'], sm_df['SM_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['mean_value'].to_numpy()[measure_argmin], sm_df['mean_value'].to_numpy()[measure_argmax]],
             [measure_downscal_fit.to_numpy()[measure_argmin], measure_downscal_fit.to_numpy()[measure_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(measure_downscal_coeff[0], measure_downscal_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['mean_value']['SM_downscal']))

    plt.subplot(133)
    plt.title("CTP-Downscal")
    plt.xlabel("CTP")
    plt.ylabel("Downscal")
    plt.scatter(sm_df['SM'], sm_df['SM_downscal'], s=5, c='b')
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot([sm_df['SM'].to_numpy()[ctp_argmin], sm_df['SM'].to_numpy()[measure_argmax]],
             [ctp_downscal_fit.to_numpy()[ctp_argmin], ctp_downscal_fit.to_numpy()[ctp_argmax]], c='r')
    plt.text(0.01, 1.0, 'y = {:.4f} x + {:.4f}'.format(ctp_downscal_coeff[0], ctp_downscal_coeff[1]))
    plt.text(0.01, 0.95, 'R = {:.4f}'.format(corr['SM']['SM_downscal']))

    plt.show()


if __name__ == '__main__':
    # albedo_dir = r"G:\test\LastTest\ALBEDO\linear_albedo_mask"
    # lst_day_dir = r"G:\test\LastTest\LST\day_rec_cut"
    # lst_night_dir = r"G:\test\LastTest\LST\night_rec_cut"
    # vi_dir = r"G:\test\LastTest\VI\VI_INDEX\savi_qc"
    # SM_dir = r"G:\test\SMAP_NEW\ESACCI\TIF\2015-2017-CLIP"
    # output_dir = r"G:\test\SMAP_NEW\2015_downscal"
    # if not os.path.exists(output_dir):
    #     os.makedirs(output_dir)
    # 数据说明：
    # 反照率数据：需要乘比例因子0.0001，nodata=-9999，数值范围0-10000
    # 地表温度数据：单位K
    # 植被指数:NDVI:nodata=-9999，数值范围-1-1
    # data_iter(lst_day_dir, lst_night_dir, albedo_dir, vi_dir, SM_dir, output_dir)

    # ctp_measure = r"G:\test\LastTest\CTP.csv"
    # read_sm_to_excel(SM_dir, output_dir, ctp_measure)
    # corr_analys(ctp_measure)

    # 直接使用ati降尺度
    # vi_dir = r"G:\test\SMAP_NEW\AVDI\msavi"
    # sm_dir = r"G:\test\SMAP_NEW\MCCA\TIF\2016-clip"
    # output_dir = r"G:\test\SMAP_NEW\AVDI\result\msavi"
    # if not os.path.exists(output_dir):
    #     os.makedirs(output_dir)
    # vi_list = [os.path.join(vi_dir, file) for file in os.listdir(vi_dir) if file.endswith('.tif')]
    # sm_list = [os.path.join(sm_dir, file) for file in os.listdir(sm_dir) if file.endswith('.tif')]
    # for vi in vi_list:
    #     sm_path = os.path.join(sm_dir, os.path.basename(vi))
    #     vi_dataset = gdal.Open(vi)
    #     sm_dataset = gdal.Open(sm_path)
    #     vi_data = vi_dataset.ReadAsArray()
    #     sm_data = sm_dataset.ReadAsArray()
    #     vi_geo = vi_dataset.GetGeoTransform()
    #     sm_geo = sm_dataset.GetGeoTransform()
    #     vi_proj = vi_dataset.GetProjection()
    #
    #     vi_data[vi_data > 1] = np.nan
    #     vi_data[vi_data < 0] = np.nan
    #
    #     output_path = os.path.join(output_dir, os.path.basename(vi))
    #     downscal_by_weight(sm_data,  vi_data, sm_geo, vi_geo, vi_proj, output_path)

    for vi_name in ['evi', 'ndvi', 'msavi', 'nirv', 'fpar', 'lai']:
        # if vi_name != 'lai':
        #     continue
        vi_dir = r"G:\test\SMAP_NEW\2015\{}".format(vi_name)
        ati_dir = r"G:\test\SMAP_NEW\2015_ati"
        # ati_dir = r"G:\test\SMAP_NEW\2015_lst"
        sm_dir = r"G:\test\SMAP_NEW\ESACCI\TIF\2014-2017"
        downscal_output_dir = r"G:\test\SMAP_NEW\2015_downscal_avdi\{}".format(vi_name)
        avdi_output_dir = r"G:\test\SMAP_NEW\2015_avdi\{}".format(vi_name)
        if not os.path.exists(downscal_output_dir):
            os.makedirs(downscal_output_dir)
        if not os.path.exists(avdi_output_dir):
            os.makedirs(avdi_output_dir)
        vi_list = [os.path.join(vi_dir, file) for file in os.listdir(vi_dir) if file.endswith('.tif')]
        ati_list = [os.path.join(ati_dir, file) for file in os.listdir(ati_dir) if file.endswith('.tif')]
        for vi in vi_list:
            basename = os.path.basename(vi).split("_")[-1]
            ati_path = os.path.join(ati_dir, basename)
            # ati_path = os.path.join(ati_dir, os.path.basename(vi))
            sm_path = os.path.join(sm_dir, basename)
            # sm_path = os.path.join(sm_dir, os.path.basename(vi))
            vi_dataset = gdal.Open(vi)
            ati_dataset = gdal.Open(ati_path)
            sm_dataset = gdal.Open(sm_path)
            if 'fpar' in vi:
                vi_data = vi_dataset.ReadAsArray().astype(np.float32) * 0.01
            elif 'lai' in vi:
                vi_data = vi_dataset.ReadAsArray().astype(np.float32) * 0.1
            else:
                vi_data = vi_dataset.ReadAsArray().astype(np.float32)
            ati_data = ati_dataset.ReadAsArray(buf_xsize=vi_data.shape[1], buf_ysize=vi_data.shape[0])
            vi_geo = vi_dataset.GetGeoTransform()
            ati_geo = ati_dataset.GetGeoTransform()
            vi_proj = vi_dataset.GetProjection()
            sm_geo = sm_dataset.GetGeoTransform()
            sm_data = sm_dataset.ReadAsArray()
            # print(sm_data.shape)
            if 'fpar' not in vi and 'lai' not in vi:
                vi_data[vi_data > 1] = np.nan
                vi_data[vi_data < 0] = np.nan
            else:
                vi_data[vi_data < 0] = np.nan
            ati_data[ati_data > 1] = np.nan
            ati_data[ati_data < 0.01] = np.nan
            a, b, c, d = feature_space_version4(vi_data, ati_data, vi_name)
            # print(a, b, c, d)
            # 计算avdi
            avdi = get_avdi(ati_data, vi_data, a, b, c, d)

            avdi[avdi > 1] = -9999
            avdi[avdi < 0] = -9999
            avdi_output_path = os.path.join(avdi_output_dir, basename)
            # write_tif(avdi_output_path, avdi, vi_geo, vi_proj, -9999, GDT_Float32)
            downscal_output_path = os.path.join(avdi_output_dir, basename)
            # downscal_by_avdi(sm_data, avdi, sm_geo, vi_geo, vi_proj, output_path)
            downscal_by_weight(sm_data, avdi, sm_geo, vi_geo, vi_proj, downscal_output_path)
            # downscal_by_naqu(sm_data, ati_data, vi_data, sm_geo, vi_geo, vi_proj, output_path)
