import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.impute import SimpleImputer
from sklearn.utils.class_weight import compute_class_weight
from xgboost import XGBClassifier
import time
from datetime import datetime

# 创建结果目录
os.makedirs('result', exist_ok=True)

# 开始计时
start_time = time.time()

# 数据加载与预处理
port_id_df = pd.read_excel('port_id.xlsx')
train_df = pd.read_excel('partoftrain.xlsx')
test_df = pd.read_excel('remaketest.xlsx')

# 创建港口编码映射
port_id_mapping = port_id_df.set_index('port_code')['port_id'].to_dict()
port_id_reverse = {v: k for k, v in port_id_mapping.items()}

# 目标变量处理
train_df['end_port_code'] = train_df['end_port_code'].map(port_id_mapping)
train_df = train_df.dropna(subset=['end_port_code']).copy()

# 特征工程增强
for df in [train_df, test_df]:
    df['build_year'] = df['build_year'] - 2000
    df['vessel_age'] = 2024 - (df['build_year'] + 2000)
    df['cargo_density'] = df['deadweight'] / (df['length'] * df['width'].replace(0, 1e-6))
    df['speed_level'] = pd.cut(df['max_speed'], 
                             bins=[0, 15, 20, 25, np.inf],
                             labels=[1, 2, 3, 4],
                             right=False).cat.codes

# 过滤低频类别
min_samples = 3
valid_classes = train_df['end_port_code'].value_counts()[lambda x: x >= min_samples].index
train_df_filtered = train_df[train_df['end_port_code'].isin(valid_classes)].copy()

# 生成低频start_port_code映射规则
start_port_stats = train_df_filtered.groupby('start_port_code')['end_port_code'].agg(['count', 'nunique', 'unique'])
low_freq_mapping = start_port_stats[
    (start_port_stats['count'] <= 3) & 
    (start_port_stats['nunique'] == 1)
]['unique'].to_dict()
low_freq_dict = {k: v[0] for k, v in low_freq_mapping.items()}  # 键为原始port_code字符串

# 标签编码
le = LabelEncoder()
train_df_filtered.loc[:, 'target'] = le.fit_transform(train_df_filtered['end_port_code'])

# 定义特征集合（保持原始start_port_code字符串）
features = [
    'start_port_code', 'I_Month', 'J_Season', 'K_Time Period',
    'start_ctry_code', 'start_lon', 'start_lat', 'start_timezone_offset',
    'vessel_model', 'build_year', 'deadweight', 'length', 'width', 
    'height', 'draught', 'max_speed', 'vessel_age', 'cargo_density', 'speed_level'
]

categorical_features = [
    'start_port_code', 'J_Season', 'K_Time Period', 
    'start_ctry_code', 'vessel_model', 'speed_level'
]

numeric_features = list(set(features) - set(categorical_features))

# 预处理流水线（使用OrdinalEncoder处理字符串分类）
preprocessor = ColumnTransformer(
    transformers=[
        ('num', Pipeline([
            ('imputer', SimpleImputer(strategy='median')),
            ('scaler', StandardScaler())
        ]), numeric_features),
        ('cat', Pipeline([
            ('imputer', SimpleImputer(strategy='most_frequent')),
            ('encoder', OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1))
        ]), categorical_features)
    ])

# 计算类别权重
classes = np.unique(train_df_filtered['target'])
class_weights = compute_class_weight(
    class_weight='balanced',
    classes=classes,
    y=train_df_filtered['target']
)

# 直接使用最佳参数配置模型
optimized_model = Pipeline([
    ('preprocessor', preprocessor),
    ('classifier', XGBClassifier(
        objective='multi:softmax',
        eval_metric='mlogloss',
        early_stopping_rounds=30,
        tree_method='hist',
        n_estimators=554,
        max_depth=12,
        learning_rate=0.07327713404303043,
        subsample=0.8579821220208961,
        colsample_bytree=0.5139996989640846,
        gamma=0.39212665565243776,
        reg_alpha=0.29496926574203647,
        reg_lambda=1.360115471897576,
        min_child_weight=3,
        n_jobs=2,
        verbosity=0
    ))
])

# 数据集划分
X = train_df_filtered[features]
y = train_df_filtered['target']
X_train, X_val, y_train, y_val = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y
)

# 关键修复：预处理验证集数据
preprocessor.fit(X_train)
X_val_transformed = preprocessor.transform(X_val)

# 模型训练
optimized_model.fit(
    X_train, y_train,
    classifier__eval_set=[(X_val_transformed, y_val)],  # 使用预处理后的验证数据
    classifier__verbose=0
)

# 模型评估
val_pred = optimized_model.predict(X_val)
print(f"验证准确率: {accuracy_score(y_val, val_pred):.4f}")

# 提取训练历史
xgb_model = optimized_model.named_steps['classifier']
results = xgb_model.evals_result()

# 绘制损失曲线
plt.figure(figsize=(10, 6))
epochs = len(results['validation_0']['mlogloss'])
x_axis = range(0, epochs)

plt.plot(x_axis, results['validation_0']['mlogloss'], label='Validation')
plt.legend()
plt.ylabel('Multi-class Log Loss')
plt.xlabel('Boosting Rounds')
plt.title('XGBoost Training History')
plt.grid(True)
# 设置 y 轴范围，例如限制在 0 到 4
plt.ylim(0, 4)

# 生成带时间戳的文件名
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
plt.savefig(f'result/training_history_{timestamp}.png', dpi=300, bbox_inches='tight')
plt.close()

# 测试集预测
def predict_with_rules(test_data):
    rule_pred = test_data['start_port_code'].map(low_freq_dict).astype(object)
    model_data = test_data[rule_pred.isna()]
    
    if not model_data.empty:
        model_pred_ids = optimized_model.predict(model_data[features])
        model_pred_port_ids = le.inverse_transform(model_pred_ids)
        model_pred_codes = [port_id_reverse.get(int(x), 'UNKNOWN') for x in model_pred_port_ids]
        rule_pred.update(pd.Series(model_pred_codes, index=model_data.index))
    
    return rule_pred.astype(str)

test_pred_codes = predict_with_rules(test_df)
test_df['end_port_code'] = test_pred_codes

# 结果保存
test_df[['uuid', 'start_port_code','end_port_code']].to_excel('entry2test6.xlsx', index=False)
print("优化预测结果已保存至entry2test6.xlsx")

print(f"脚本运行总时间: {time.time()-start_time:.2f}秒")