import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.feature_selection import SelectKBest, f_regression
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

print("===== 上市公司营收预测建模分析（简化版）=====")

# 读取数据
df = pd.read_excel("上市公司营收数据.xlsx")
print(f"数据加载完成，共{df.shape[0]}行，{df.shape[1]}列")

# 1. 数据预处理和特征工程
print("\n===== 1. 数据预处理和特征工程 =====")

# 计算额外财务指标
df['毛利率'] = (df['营业收入'] - df['营业成本']) / df['营业收入']
df['资产负债率'] = df['负债合计'] / df['资产总计']
df['权益乘数'] = df['资产总计'] / df['所有者权益合计']
df['经营现金流比率'] = df['经营活动产生的现金流量净额'] / df['营业收入']

# 处理可能的无穷大和缺失值
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
print(f"特征工程完成，处理后数据量：{df.shape[0]}行")

# 特征相关性分析
print("\n特征相关性矩阵（与营业收入的相关性）：")
numeric_features = df.drop(['证券代码', '证券简称'], axis=1)
corr_with_revenue = numeric_features.corr()[['营业收入']].sort_values('营业收入', ascending=False)
print(corr_with_revenue.head(10))

# 可视化相关性热图
plt.figure(figsize=(10, 8))
corr = numeric_features.iloc[:, :10].corr()  # 只取前10个特征以加快速度
mask = np.triu(np.ones_like(corr, dtype=bool))
sns.heatmap(corr, mask=mask, annot=False, cmap='coolwarm')
plt.title('特征相关性热图')
plt.tight_layout()
plt.savefig('correlation_heatmap.png', dpi=300)
plt.close()

# 2. 特征选择
print("\n===== 2. 特征选择 =====")

# 准备特征和目标变量
X = df.drop(['证券代码', '证券简称', '营业收入'], axis=1)
y = df['营业收入']

# 标准化特征
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 使用SelectKBest选择最重要的特征
selector = SelectKBest(f_regression, k=6)
X_selected = selector.fit_transform(X_scaled, y)
selected_features = X.columns[selector.get_support()]
print(f"选择的特征：{selected_features.tolist()}")

# 可视化特征重要性
feature_scores = pd.DataFrame({
    'Feature': X.columns[:10],  # 只显示前10个特征
    'Score': selector.scores_[:10]
}).sort_values('Score', ascending=False)

plt.figure(figsize=(10, 6))
sns.barplot(x='Score', y='Feature', data=feature_scores)
plt.title('特征重要性排序')
plt.tight_layout()
plt.savefig('feature_importance.png', dpi=300)
plt.close()

# 3. 模型构建和评估
print("\n===== 3. 模型构建和评估 =====")

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_selected, y, test_size=0.2, random_state=42)
print(f"训练集大小：{X_train.shape[0]}，测试集大小：{X_test.shape[0]}")

# 存储模型和评估结果
models = {}
results = {}

# 定义评估函数
def evaluate_model(model_name, model, X_train, y_train, X_test, y_test):
    model.fit(X_train, y_train)
    y_pred_train = model.predict(X_train)
    y_pred_test = model.predict(X_test)
    
    # 评估指标
    metrics = {
        'Train_R2': r2_score(y_train, y_pred_train),
        'Test_R2': r2_score(y_test, y_pred_test),
        'Train_RMSE': np.sqrt(mean_squared_error(y_train, y_pred_train)),
        'Test_RMSE': np.sqrt(mean_squared_error(y_test, y_pred_test)),
        'Test_MAE': mean_absolute_error(y_test, y_pred_test)
    }
    
    print(f"\n{model_name} 模型评估：")
    print(f"训练集 R²: {metrics['Train_R2']:.4f}")
    print(f"测试集 R²: {metrics['Test_R2']:.4f}")
    print(f"测试集 RMSE: {metrics['Test_RMSE']:.2f}")
    
    return model, metrics

# 4. 线性回归模型
print("\n----- 线性回归模型 -----\n")
linear_model = LinearRegression()
models['Linear'], results['Linear'] = evaluate_model('线性回归', linear_model, X_train, y_train, X_test, y_test)

# 5. Ridge回归模型
print("\n----- Ridge回归模型 -----\n")
ridge_model = Ridge(alpha=1.0)
models['Ridge'], results['Ridge'] = evaluate_model('Ridge回归', ridge_model, X_train, y_train, X_test, y_test)

# 6. 随机森林回归模型（简化参数）
print("\n----- 随机森林回归模型 -----\n")
rfr_model = RandomForestRegressor(n_estimators=50, max_depth=10, random_state=42)
models['RandomForest'], results['RandomForest'] = evaluate_model('随机森林', rfr_model, X_train, y_train, X_test, y_test)

# 7. 模型比较
print("\n===== 4. 模型比较 =====")
model_comparison = pd.DataFrame(results).T
print("\n各模型性能比较:")
print(model_comparison[["Train_R2", "Test_R2", "Test_RMSE"]].sort_values("Test_R2", ascending=False))

# 可视化模型性能比较
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
model_comparison[['Train_R2', 'Test_R2']].plot(kind='bar', ax=plt.gca())
plt.title('各模型R²分数')
plt.ylabel('R²')
plt.grid(axis='y', linestyle='--', alpha=0.7)

plt.subplot(1, 2, 2)
model_comparison[['Test_RMSE']].plot(kind='bar', ax=plt.gca())
plt.title('各模型RMSE')
plt.ylabel('RMSE')
plt.grid(axis='y', linestyle='--', alpha=0.7)

plt.tight_layout()
plt.savefig('model_comparison.png', dpi=300)
plt.close()

# 8. 确定最佳模型并分析关键因素
print("\n===== 5. 最佳模型分析 =====")

# 找出测试集R²最高的模型
best_model_name = max(results, key=lambda x: results[x]['Test_R2'])
best_model = models[best_model_name]
print(f"最佳模型: {best_model_name}")
print(f"测试集R²: {results[best_model_name]['Test_R2']:.4f}")

# 分析特征重要性
if best_model_name == 'RandomForest':
    importance = best_model.feature_importances_
    feature_importance = pd.DataFrame({
        'Feature': selected_features,
        'Importance': importance
    }).sort_values('Importance', ascending=False)
    
    print("\n关键影响因素排序:")
    for i, (idx, row) in enumerate(feature_importance.iterrows(), 1):
        print(f"{i}. {row['Feature']}: {row['Importance']:.4f}")
    
    # 可视化特征重要性
    plt.figure(figsize=(10, 6))
    sns.barplot(x='Importance', y='Feature', data=feature_importance)
    plt.title(f'{best_model_name}模型特征重要性')
    plt.tight_layout()
    plt.savefig(f'{best_model_name.lower()}_feature_importance.png', dpi=300)
    plt.close()
else:
    # 对于线性模型，使用系数作为重要性指标
    if hasattr(best_model, 'coef_'):
        coefs = best_model.coef_
        feature_importance = pd.DataFrame({
            'Feature': selected_features,
            'Coefficient': coefs,
            'Abs_Coefficient': np.abs(coefs)
        }).sort_values('Abs_Coefficient', ascending=False)
        
        print("\n关键影响因素排序（基于系数绝对值）:")
        for i, (idx, row) in enumerate(feature_importance.iterrows(), 1):
            print(f"{i}. {row['Feature']}: {row['Coefficient']:.4f}")

# 9. 预测vs实际值可视化（抽样以加快速度）
sample_size = min(500, len(y_test))
sample_indices = np.random.choice(len(y_test), sample_size, replace=False)
y_test_sample = y_test.iloc[sample_indices]
y_pred_best_sample = best_model.predict(X_test[sample_indices])

plt.figure(figsize=(10, 6))
plt.scatter(y_test_sample, y_pred_best_sample, alpha=0.5)
plt.plot([y_test_sample.min(), y_test_sample.max()], 
         [y_test_sample.min(), y_test_sample.max()], 'r--')
plt.xlabel('实际营业收入')
plt.ylabel('预测营业收入')
plt.title(f'{best_model_name}模型预测结果 vs 实际值')
plt.grid(True, linestyle='--', alpha=0.7)
plt.tight_layout()
plt.savefig(f'{best_model_name.lower()}_prediction_scatter.png', dpi=300)
plt.close()

print("\n===== 分析完成 =====")
print(f"所有图表已保存到当前目录")
print(f"最佳模型是 {best_model_name}，测试集R²为 {results[best_model_name]['Test_R2']:.4f}")
print("关键影响因素已在控制台输出和图表中展示")