# 导入包 (移除非必要的pymysql直接导入)
import pandas as pd
import statsmodels.formula.api as smf
from sklearn.model_selection import train_test_split
from sqlalchemy import create_engine
from sklearn.metrics import r2_score, mean_squared_error
import matplotlib.pyplot as plt
import statsmodels.api as sm
import numpy as np
import seaborn as sns

# 配置数据库连接（使用SQLAlchemy统一接口）
db_config = {
    'host': 'localhost',
    'user': 'root',
    'password': '123456',
    'database': 'tushare1',
    'port': 3306,
    'charset': 'utf8mb4'
}

# 创建SQLAlchemy引擎（关键修改点）
engine = create_engine(
    f"mysql+pymysql://{db_config['user']}:{db_config['password']}@{db_config['host']}:{db_config['port']}/{db_config['database']}?"
    f"charset={db_config['charset']}"
)

# 使用chunksize分块读取大数据（保留原有逻辑）
chunk_size = 10000

# 获取华夏银行日线数据（使用engine替代conn）
df = pd.read_sql_query(
    """
    SELECT d.*, i.closes as i_closes, i.vol as i_vol
    FROM date_1 d
    LEFT JOIN index_daily i 
        ON d.trade_date = i.trade_date 
        AND i.ts_code='000001.SH'
    WHERE d.trade_date BETWEEN '2023-01-01' AND '2023-12-31' 
        AND d.ts_code='600015.SH'
    """, 
    engine,  # 这里改为使用engine而不是conn
    chunksize=chunk_size
)

# 合并数据块
df1 = pd.concat(df, ignore_index=True)

# 输出所有数据（数据量较小时使用）
pd.set_option('display.max_rows', None)  # 取消行数限制
print(df1)

# 关闭引擎（可选，推荐保持长期连接）
engine.dispose()