import pandas as pd
import numpy as np
from statsmodels.tsa.arima.model import ARIMA
import xgboost as xgb
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error

# 读取本地的LYG2023.csv文件
file_path = 'LYG2023.csv'
df = pd.read_csv(file_path)

file_name = list(file_path.split('.')[0][-1:-5:-1])
file_name.reverse()
year = ('').join(file_name)

# 合并日期和时间列
df['Time'] = pd.to_datetime(year + df['Date'].astype(str) + ' ' + df['Time'].astype(str) + ':00:00',
                            format='%Y%j %H:%M:%S')
df.set_index('Time', inplace=True)

# 步骤 1: 严格的数据预处理
print("数据预处理前:")
print(df.dtypes)  # 打印数据类型，便于调试

# 确保所有列都是数值类型
for col in df.columns:
    if df[col].dtype == 'object':
        try:
            df[col] = pd.to_numeric(df[col], errors='coerce')
            print(f"列 {col} 已成功转换为数值类型")
        except:
            print(f"列 {col} 无法转换为数值类型，将被删除")
            df = df.drop(col, axis=1)

# 删除全为NaN的列
df = df.dropna(axis=1, how='all')

# 填充缺失值 (使用前向填充和后向填充结合)
df = df.ffill().bfill()

# 再次检查数据类型
print("\n数据预处理后:")
print(df.dtypes)

# 验证是否所有列都是数值类型
non_numeric_cols = df.select_dtypes(exclude=[np.number]).columns
if len(non_numeric_cols) > 0:
    print("\n警告: 仍有非数值列存在:")
    print(non_numeric_cols)
    df = df.select_dtypes(include=[np.number])
    print("已筛选出数值列进行后续分析")


# 步骤 2: 使用 ARIMA 进行时间序列预测
def arima_forecast(data, p=1, d=1, q=1):
    try:
        # 再次确认数据没有缺失值
        if data.isna().any():
            data = data.ffill().bfill()

        model = ARIMA(data, order=(p, d, q))
        model_fit = model.fit()
        forecast = model_fit.forecast(steps=1)
        return forecast.values[0]
    except Exception as e:
        print(f"对列 {data.name} 进行 ARIMA 预测时出错: {e}")
        # 返回该列的最后一个有效值作为替代
        return data.dropna().iloc[-1]


# 初始化一个空列表，用于存储每个维度的 ARIMA 预测结果
arima_features = []
# 遍历 DataFrame 的每一列
for column in df.columns:
    # print(f"处理列: {column}")
    arima_features.append(arima_forecast(df[column]))

# 将 arima_features 列表转换为 numpy 数组，并将其形状调整为 (1, -1)
arima_features = np.array(arima_features).reshape(1, -1)

# 步骤 3: 数据标准化
scaler = StandardScaler()
arima_features_scaled = scaler.fit_transform(arima_features)

# 步骤 4: 读取真实的训练数据和标签
train_data = df

# 确保训练数据和标签行数一致
X_train = train_data[df.columns].values
num_samples = X_train.shape[0]

# 生成与训练数据行数一致的随机标签
y_fishery_train = np.random.rand(num_samples) * 100
y_tourism_train = np.random.rand(num_samples) * 100
y_shipping_train = np.random.rand(num_samples) * 100
y_energy_train = np.random.rand(num_samples) * 100
y_farming_train = np.random.rand(num_samples) * 100
y_biotech_train = np.random.rand(num_samples) * 100


# 训练 XGBoost 模型
def train_xgboost(X, y):
    model = xgb.XGBRegressor()
    model.fit(X, y)
    return model


# 训练各个行业得分的模型
model_fishery = train_xgboost(X_train, y_fishery_train)
model_tourism = train_xgboost(X_train, y_tourism_train)
model_shipping = train_xgboost(X_train, y_shipping_train)
model_energy = train_xgboost(X_train, y_energy_train)
model_farming = train_xgboost(X_train, y_farming_train)
model_biotech = train_xgboost(X_train, y_biotech_train)

# 步骤 5: 预测得分
score_fishery = model_fishery.predict(arima_features_scaled)[0]
score_tourism = model_tourism.predict(arima_features_scaled)[0]
score_shipping = model_shipping.predict(arima_features_scaled)[0]
score_energy = model_energy.predict(arima_features_scaled)[0]
score_farming = model_farming.predict(arima_features_scaled)[0]
score_biotech = model_biotech.predict(arima_features_scaled)[0]

# 步骤 6: 转换为百分制得分
score_fishery = np.clip(score_fishery, 0, 100)
score_tourism = np.clip(score_tourism, 0, 100)
score_shipping = np.clip(score_shipping, 0, 100)
score_energy = np.clip(score_energy, 0, 100)
score_farming = np.clip(score_farming, 0, 100)
score_biotech = np.clip(score_biotech, 0, 100)

# 输出结果
print(f"\n最终预测得分:")
print(f"渔业推荐得分: {score_fishery:.2f}")
print(f"旅游业推荐得分: {score_tourism:.2f}")
print(f"航运业推荐得分: {score_shipping:.2f}")
print(f"能源业推荐得分: {score_energy:.2f}")
print(f"养殖业推荐得分: {score_farming:.2f}")
print(f"生物技术业推荐得分: {score_biotech:.2f}")


# import pandas as pd
# import numpy as np
# from statsmodels.tsa.arima.model import ARIMA
# import xgboost as xgb
# from sklearn.preprocessing import StandardScaler
# from sklearn.metrics import mean_squared_error
# import os

# # 获取当前脚本所在目录
# script_dir = os.path.dirname(os.path.abspath(__file__))

# # 读取本地的LYG2023.csv文件
# file_path = os.path.join(script_dir, 'LYG2023.csv')
# df = pd.read_csv(file_path)

# # 从文件名中提取年份
# file_name = list(file_path.split('.')[0][-1:-5:-1])
# file_name.reverse()
# year = ('').join(file_name)

# # 合并日期和时间列
# df['Time'] = pd.to_datetime(year + df['Date'].astype(str) + ' ' + df['Time'].astype(str) + ':00:00',
#                             format='%Y%j %H:%M:%S')
# df.set_index('Time', inplace=True)

# # 步骤 1: 严格的数据预处理
# print("数据预处理前:")
# print(df.dtypes)  # 打印数据类型，便于调试

# # 确保所有列都是数值类型
# for col in df.columns:
#     if df[col].dtype == 'object':
#         try:
#             df[col] = pd.to_numeric(df[col], errors='coerce')
#             print(f"列 {col} 已成功转换为数值类型")
#         except:
#             print(f"列 {col} 无法转换为数值类型，将被删除")
#             df = df.drop(col, axis=1)

# # 删除全为NaN的列
# df = df.dropna(axis=1, how='all')

# # 填充缺失值 (使用前向填充和后向填充结合)
# df = df.ffill().bfill()

# # 再次检查数据类型
# print("\n数据预处理后:")
# print(df.dtypes)

# # 验证是否所有列都是数值类型
# non_numeric_cols = df.select_dtypes(exclude=[np.number]).columns
# if len(non_numeric_cols) > 0:
#     print("\n警告: 仍有非数值列存在:")
#     print(non_numeric_cols)
#     df = df.select_dtypes(include=[np.number])
#     print("已筛选出数值列进行后续分析")

# # 步骤 2: 使用 ARIMA 进行时间序列预测
# def arima_forecast(data, p=1, d=1, q=1):
#     try:
#         # 再次确认数据没有缺失值
#         if data.isna().any():
#             data = data.ffill().bfill()

#         model = ARIMA(data, order=(p, d, q))
#         model_fit = model.fit()
#         forecast = model_fit.forecast(steps=1)
#         return forecast.values[0]
#     except Exception as e:
#         print(f"对列 {data.name} 进行 ARIMA 预测时出错: {e}")
#         # 返回该列的最后一个有效值作为替代
#         return data.dropna().iloc[-1]

# # 初始化一个空列表，用于存储每个维度的 ARIMA 预测结果
# arima_features = []
# # 遍历 DataFrame 的每一列
# for column in df.columns:
#     arima_features.append(arima_forecast(df[column]))

# # 将 arima_features 列表转换为 numpy 数组，并将其形状调整为 (1, -1)
# arima_features = np.array(arima_features).reshape(1, -1)

# # 步骤 3: 数据标准化
# scaler = StandardScaler()
# arima_features_scaled = scaler.fit_transform(arima_features)

# # 步骤 4: 读取真实的训练数据和标签
# train_data = df

# TODO:下面的是我修改后的代码，随机生成得分已经修改为读取真实得分，
# TODO:得分数据由createScores.py生成，根据维度赋予权重后计算每一行数据对应各产业的得分
# TODO:但是读取文件核心有点问题还要修改，读取文件提示找不到文件，需要看下咋回事(createScores.py是读取services目录下的文件然后生成csv到项目根目录)

# # 读取已计算好的产业得分数据
# try:
#     # 读取包含产业得分的CSV文件
#     scores_file = os.path.join(script_dir, 'LYG2001_weighted.csv')
#     scores_df = pd.read_csv(scores_file)
    
#     # 提取各产业得分列
#     y_fishery_train = scores_df['fishery_scores'].values
#     y_tourism_train = scores_df['tourism_scores'].values
#     y_shipping_train = scores_df['shipping_scores'].values
#     y_energy_train = scores_df['energy_scores'].values
#     y_farming_train = scores_df['farming_scores'].values
#     y_biotech_train = scores_df['biotech_scores'].values
    
#     # 验证标签数据的有效性
#     for label_name, labels in [
#         ('渔业', y_fishery_train), 
#         ('旅游业', y_tourism_train),
#         ('航运业', y_shipping_train), 
#         ('能源业', y_energy_train),
#         ('农业', y_farming_train), 
#         ('生物技术业', y_biotech_train)
#     ]:
#         if len(labels) != len(train_data):
#             raise ValueError(f"{label_name}得分数据行数({len(labels)})与训练数据行数({len(train_data)})不一致")
#         if np.isnan(labels).any():
#             print(f"警告: {label_name}得分数据包含NaN值，将使用中位数填充")
#             median_val = np.nanmedian(labels)
#             labels = np.nan_to_num(labels, nan=median_val)
            
#     print("成功读取产业得分数据")
    
# except Exception as e:
#     print(f"读取产业得分数据时出错: {e}")
#     print("将使用随机生成的标签进行训练（仅用于测试）")
    
#     # 确保训练数据和标签行数一致
#     num_samples = train_data.shape[0]
    
#     # 生成与训练数据行数一致的随机标签
#     y_fishery_train = np.random.rand(num_samples) * 100
#     y_tourism_train = np.random.rand(num_samples) * 100
#     y_shipping_train = np.random.rand(num_samples) * 100
#     y_energy_train = np.random.rand(num_samples) * 100
#     y_farming_train = np.random.rand(num_samples) * 100
#     y_biotech_train = np.random.rand(num_samples) * 100

# # 训练 XGBoost 模型
# def train_xgboost(X, y):
#     model = xgb.XGBRegressor()
#     model.fit(X, y)
#     return model

# # 训练各个行业得分的模型
# model_fishery = train_xgboost(train_data, y_fishery_train)
# model_tourism = train_xgboost(train_data, y_tourism_train)
# model_shipping = train_xgboost(train_data, y_shipping_train)
# model_energy = train_xgboost(train_data, y_energy_train)
# model_farming = train_xgboost(train_data, y_farming_train)
# model_biotech = train_xgboost(train_data, y_biotech_train)

# # 步骤 5: 预测得分
# score_fishery = model_fishery.predict(arima_features_scaled)[0]
# score_tourism = model_tourism.predict(arima_features_scaled)[0]
# score_shipping = model_shipping.predict(arima_features_scaled)[0]
# score_energy = model_energy.predict(arima_features_scaled)[0]
# score_farming = model_farming.predict(arima_features_scaled)[0]
# score_biotech = model_biotech.predict(arima_features_scaled)[0]

# # 步骤 6: 转换为百分制得分
# score_fishery = np.clip(score_fishery, 0, 100)
# score_tourism = np.clip(score_tourism, 0, 100)
# score_shipping = np.clip(score_shipping, 0, 100)
# score_energy = np.clip(score_energy, 0, 100)
# score_farming = np.clip(score_farming, 0, 100)
# score_biotech = np.clip(score_biotech, 0, 100)

# # 输出结果
# print(f"\n最终预测得分:")
# print(f"渔业推荐得分: {score_fishery:.2f}")
# print(f"旅游业推荐得分: {score_tourism:.2f}")
# print(f"航运业推荐得分: {score_shipping:.2f}")
# print(f"能源业推荐得分: {score_energy:.2f}")
# print(f"农业推荐得分: {score_farming:.2f}")
# print(f"生物技术业推荐得分: {score_biotech:.2f}")