# 1 2 3 5 的整合，处理单个tif图像，后面整理一个批量处理的代码。

# 运行run.py文件
# 输入为：tif文件。
# 识别文件名，举例：Landset8_20240925，因Landset5、Landset7、Landset8、Landset9用到的波段名称不同，用到的参数不同，需要进行识别判断。
# 输出为：
# （1）csv文件Landset8_20240925.csv，包含从tif文件中提取出来的经纬度和波段值。
# （2）csv文件Landset8_20240925_raw.csv，包含经纬度坐标信息、计算的原始生态因子。
# （2）csv文件Landset8_20240925_resi.csv，包含经纬度坐标信息、归一化后的六种生态因子数值、RSEI数值。
# （3）png图片Landset8_20240925_hot.png，热力图


import rasterio
import csv
from pyproj import Transformer
import os
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

# 指定要处理的单个 TIF 文件路径
tif_file = 'Landset8_20130406.tif'  # 替换为您的文件名
# CSV 文件保存的新文件夹路径

# 提取时间
date = tif_file.split('_')[1].split('.')[0]  # 假设时间在第二个下划线分隔的部分
print(f"提取的时间为: {date}")

# 提取卫星名（假设卫星名在第一个下划线分隔的部分）
satellite_name = tif_file.split('_')[0]
print(f"卫星名为: {satellite_name}")

# 定义波段名称映射
LC57_BANDS = ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B7', 'ST_B6']  # Landsat 5/7
LC89_BANDS = ['SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7', 'ST_B10']  # Landsat 8/9

if satellite_name in ['Landset5', 'Landset7']:
    band_names = LC57_BANDS
elif satellite_name in ['Landset8', 'Landset9']:
    band_names = LC89_BANDS
else:
    band_names = []  # 处理未知的卫星名称
    print("未知的卫星名，请检查文件名。")

# 打印选定的波段以确认
print(f"选定的波段为: {band_names}")

# CSV 文件的完整路径
csv_file =os.path.basename(tif_file).replace('.tif', '.csv')

# 打开 TIF 文件
with rasterio.open(tif_file) as src:
    # 获取波段数量
    band_count = src.count
    print(f"{os.path.basename(tif_file)} 波段数量: {band_count}")

    # 获取波段数据的行数和列数
    rows, cols = src.read(1).shape

    # 读取波段数据
    bands_data = np.zeros((band_count, rows, cols), dtype=src.read(1).dtype)
    for band in range(1, band_count + 1):
        bands_data[band - 1] = src.read(band)

    # 获取当前影像的投影坐标系（比如 UTM）到经纬度的转换
    # transformer = None
    transformer = Transformer.from_crs("epsg:32649", "epsg:4326", always_xy=True)


    # 打开 CSV 文件进行写入
    with open(csv_file, mode='w', newline='') as file:
        writer = csv.writer(file)

        # 写入 CSV 文件的表头
        header = ['longitude', 'latitude']
        header.extend(band_names[:band_count])  # 只使用与波段数量相等的波段名称
        writer.writerow(header)

        # 遍历每个像素
        for row in range(rows):
            for col in range(cols):
                # 获取像素的投影坐标（x, y）
                x, y = src.xy(row, col)

                # 如果影像是投影坐标系，则转换为经纬度（WGS84）
                if transformer:
                    lon, lat = transformer.transform(x, y)
                else:
                    lon, lat = x, y  # 如果已经是经纬度坐标系

                # 获取每个波段的值
                values = bands_data[:, row, col]

                # 写入一行数据
                writer.writerow([lon, lat] + values.tolist())

    print(f"{os.path.basename(tif_file)} 数据已导出到 {csv_file}")

df = pd.read_csv(csv_file)

def get_wet_coefficients(satellite_name):
    if satellite_name == "Landset6":
        return {"Blue": 0.0315, "Green": 0.2021, "Red": 0.3012, "NIR": 0.1594, "SWIR1": -0.6806, "SWIR2": -0.6109}
    elif satellite_name == "Landset7":
        return {"Blue": 0.2626, "Green": 0.2141, "Red": 0.0926, "NIR": 0.0656, "SWIR1": -0.7629, "SWIR2": -0.5388}
    else:
        return {"Blue": 0.1511, "Green": 0.1973, "Red": 0.3283, "NIR": 0.3407, "SWIR1": -0.7117, "SWIR2": -0.4559}

for col in df.columns:
    if col in ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7']:
        df[col] = df[col] * 0.0000275 - 0.2
    elif col == 'ST_B10' or col == 'ST_B6':
        df[col] = df[col] * 0.00341802 + 149.0

# 定义新列名
new_column_names = {
    'SR_B2': 'Blue',
    'SR_B3': 'Green',
    'SR_B4': 'Red',
    'SR_B5': 'NIR',
    'SR_B6': 'SWIR1',
    'SR_B7': 'SWIR2',
    'ST_B10': 'TIR'
}

# 重命名列
df.rename(columns=new_column_names, inplace=True)

wet_coefficients = get_wet_coefficients(satellite_name)

# 计算生态指数
df['WET'] = (df['Blue'] * wet_coefficients['Blue'] +
                  df['Green'] * wet_coefficients['Green'] +
                  df['Red'] * wet_coefficients['Red'] +
                  df['NIR'] * wet_coefficients['NIR'] +
                  df['SWIR1'] * wet_coefficients['SWIR1'] +
                  df['SWIR2'] * wet_coefficients['SWIR2'])

df['SI'] = (df['SWIR1'] + df['Red'] - df['NIR'] - df['Blue']) / \
                (df['SWIR1'] + df['Red'] + df['NIR'] + df['Blue'])

df['IBI'] = ((2.0 * df['SWIR1']) / (df['SWIR1'] + df['NIR']) -
                  (df['NIR'] / (df['NIR'] + df['Red']) +
                   df['Green'] / (df['Green'] + df['SWIR1']))) / \
                 ((2.0 * df['SWIR1']) / (df['SWIR1'] + df['NIR']) +
                  (df['NIR'] / (df['NIR'] + df['Red']) +
                   df['Green'] / (df['Green'] + df['SWIR1'])))

df['NDBSI'] = (df['IBI'] + df['SI']) / 2
df['NDVI'] = (df['NIR'] -df['Red']) / (df['NIR'] + df['Red'])
df['LST'] = df['TIR'] - 273.15  # 假设TIR列已经重命名为'TIR'
df['Albedo'] = 0.356 * df['Blue'] + 0.130 * df['Red'] + 0.373 * df['NIR'] + 0.072 * df['SWIR2'] - 0.1108
# 使用线性回归计算 NDVI 和 Albedo 之间的斜率
# 将 NDVI 数据转换为二维数组
NDVI = df['NDVI'].values.reshape(-1, 1)
Albedo = df['Albedo'].values
# 创建线性回归模型
model = LinearRegression()
# 拟合模型
model.fit(NDVI, Albedo)
# 获取斜率 a
K = model.coef_[0]
df['DDI'] = -1 * (1 / K) * df['NDVI'] - df['Albedo']
df['DI'] = -1 * df['DDI']

df['PMDI'] = df['Red'] - df['NIR']

df['SI3'] = np.sqrt(df['Green'] * df['Green'] + df['Red'] * df['Red'])
df['NDSI'] = (df['Red'] - df['NIR']) / (df['Red'] + df['NIR'])
df['SI_T'] = (df['Red'] / df['NIR']) * 100
df['CSI'] = (df['SI_T'] + df['NDSI'] + df['SI3']) / 3


# 保存需要的列到新的文件
output_raw_columns = ['longitude', 'latitude', 'NDVI', 'WET', 'NDBSI', 'LST', 'PMDI', 'DI', 'CSI']
output_raw_file_path = os.path.basename(tif_file).replace('.tif', '_raw.csv')
df[output_raw_columns].to_csv(output_raw_file_path, index=False)

# 定义需要归一化的列
# columns_to_normalize = ['NDVI', 'WET', 'NDBSI', 'LST', 'PMDI', 'DI', 'CSI']
columns_to_normalize = ['NDVI', 'WET', 'NDBSI', 'LST', 'PMDI', 'CSI']

# 对每一列进行归一化
for column in columns_to_normalize:
    # 使用 Min-Max 归一化公式：(x - min) / (max - min)
    df[column] = (df[column] - df[column].min()) / (df[column].max() - df[column].min())

# 进行主成分分析 (PCA)
pca = PCA(n_components=1)  # 提取一个主成分作为 RSEI
rsei_0 = pca.fit_transform(df[columns_to_normalize])

# 将 RSEI 添加到原数据框中
df['RSEI'] = 1 - rsei_0

# 对 RSEI 进行归一化
df['RSEI'] = (df['RSEI'] - df['RSEI'].min()) / (df['RSEI'].max() - df['RSEI'].min())

# 定义要保存的列（包括经纬度和计算后的指标）
columns_to_save = ['longitude', 'latitude', 'NDVI', 'WET', 'NDBSI', 'LST', 'PMDI', 'CSI', 'RSEI']

# 保存包含 RSEI 的数据到新的文件
output_rsei_file_path =os.path.basename(tif_file).replace('.tif', '_rsei.csv')
df[columns_to_save].to_csv(output_rsei_file_path, index=False)

print(f"已将包含 RSEI 的数据保存到文件: {output_rsei_file_path}")

# 输出 RSEI 和各个指标的影响关系
loadings = pca.components_[0]  # 主成分载荷
loading_df = pd.DataFrame(loadings, index=columns_to_normalize, columns=['Loading'])

# 考虑取反操作对载荷的影响
loading_df['Loading'] = -loading_df['Loading']  # 取反载荷值

# 判断关系（正相关或负相关）
loading_df['Relationship'] = loading_df['Loading'].apply(lambda x: 'Positive' if x > 0 else 'Negative')

# 计算贡献大小（绝对值）
loading_df['Magnitude'] = loading_df['Loading'].abs()

print("RSEI 和各个指标的影响关系：")
print(loading_df)

# 设置CSV文件路径和图片保存路径
pic_path =os.path.basename(tif_file).replace('.tif', '_hot.png')

# 读取CSV文件
dataset = pd.read_csv(output_rsei_file_path)

# 确保data是dataset的一个副本
data = dataset[['NDBSI', 'NDVI', 'WET', 'LST', 'RSEI']].copy()
data.loc[:, 'grade'] = pd.cut(x=data['RSEI'], bins=[0, 0.2, 0.4, 0.6, 0.8, 1.0], labels=[1, 2, 3, 4, 5])

# 直接从dataset中提取'latitude'和'longitude'列，并添加到data中
data = pd.concat([data, dataset[['longitude','latitude' ]]], axis=1)

# 自定义颜色映射，从红色到绿色
cmap = plt.get_cmap('RdYlGn')  # 使用RdYlGn颜色映射，红色到绿色
norm = plt.Normalize(vmin=0, vmax=1)  # 归一化RSEI值到0-1范围

# 绘制热力图
plt.figure(figsize=(10, 8))
plt.scatter(data['longitude'], data['latitude'], c=data['RSEI'], cmap=cmap, norm=norm, s=10)
plt.colorbar(label='RSEI')  # 添加颜色条
plt.title('RSEI Heatmap')
plt.xlabel('Latitude')
plt.ylabel('Longitude')

# 保存图片
plt.savefig(pic_path, dpi=1200)

# 显示图片
plt.show()