import pandas as pd
from sqlalchemy import create_engine, exc, text
from sqlalchemy.types import VARCHAR, DATETIME, FLOAT, INTEGER
import time
from tqdm import tqdm
import os


def create_mysql_table(engine, table_name='stock_5min_data'):
    """
    创建MySQL数据表（如果不存在） - 修正版
    """
    # 移除SQL中的注释和换行，简化语句
    create_table_sql = text(f"""
    CREATE TABLE IF NOT EXISTS {table_name} (
        id INT AUTO_INCREMENT PRIMARY KEY,
        symbol VARCHAR(10) NOT NULL,
        datetime DATETIME NOT NULL,
        date DATE,
        time TIME,
        day_of_week TINYINT,
        open FLOAT,
        close FLOAT,
        high FLOAT,
        low FLOAT,
        volume INTEGER,
        amount FLOAT,
        amplitude FLOAT,
        change_percent FLOAT,
        change_amount FLOAT,
        turnover_rate FLOAT,
        adjust_type VARCHAR(3),
        update_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
        UNIQUE KEY uk_symbol_datetime (symbol, datetime)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
    """)

    try:
        with engine.begin() as conn:  # 使用begin()确保事务
            conn.execute(create_table_sql)
        print(f"表 {table_name} 创建成功或已存在")
        return True
    except exc.SQLAlchemyError as e:
        print(f"创建表失败: {str(e)}")
        return False


def csv_to_mysql(csv_file, mysql_config, table_name='stock_5min_data'):
    """
    将CSV文件导入MySQL数据库 - 修正版
    """
    # 检查文件是否存在
    if not os.path.exists(csv_file):
        print(f"文件 {csv_file} 不存在")
        return False

    # 创建数据库连接引擎
    db_url = f"mysql+pymysql://{mysql_config['user']}:{mysql_config['password']}@{mysql_config['host']}:{mysql_config['port']}/{mysql_config['database']}?charset=utf8mb4"

    try:
        engine = create_engine(db_url)

        # 创建表（如果不存在）
        if not create_mysql_table(engine, table_name):
            return False

        # 读取CSV文件
        print(f"正在读取CSV文件: {csv_file}")

        # 先读取一小部分数据确定列名
        sample_df = pd.read_csv(csv_file, nrows=5)

        # 定义数据类型映射
        dtype_mapping = {
            'datetime': DATETIME,
            'symbol': VARCHAR(10),
            'open': FLOAT,
            'close': FLOAT,
            'high': FLOAT,
            'low': FLOAT,
            'volume': INTEGER,
            'amount': FLOAT,
            'amplitude': FLOAT,
            'change_percent': FLOAT,
            'change_amount': FLOAT,
            'turnover_rate': FLOAT,
            'adjust': VARCHAR(3),
            'date': VARCHAR(10),
            'time': VARCHAR(10),
            'day_of_week': INTEGER
        }

        # 只保留实际存在的列
        dtype_mapping = {k: v for k, v in dtype_mapping.items() if k in sample_df.columns}

        # 获取总行数
        total_rows = sum(1 for _ in open(csv_file, 'r', encoding='utf-8')) - 1

        # 分块处理
        chunksize = 10000
        processed_rows = 0

        with tqdm(total=total_rows, desc="导入进度") as pbar:
            for chunk in pd.read_csv(csv_file, chunksize=chunksize, encoding='utf-8'):
                # 数据预处理
                if 'date' in chunk.columns:
                    chunk['date'] = pd.to_datetime(chunk['date']).dt.date
                if 'time' in chunk.columns:
                    chunk['time'] = pd.to_datetime(chunk['time'], format='%H:%M:%S').dt.time

                # 写入数据库
                try:
                    chunk.to_sql(
                        name=table_name,
                        con=engine,
                        if_exists='append',
                        index=False,
                        dtype=dtype_mapping,
                        method='multi'
                    )
                    processed_rows += len(chunk)
                    pbar.update(len(chunk))
                except exc.IntegrityError:
                    # 处理重复数据
                    for _, row in chunk.iterrows():
                        try:
                            row_df = pd.DataFrame([row])
                            row_df.to_sql(
                                name=table_name,
                                con=engine,
                                if_exists='append',
                                index=False,
                                dtype=dtype_mapping
                            )
                            processed_rows += 1
                            pbar.update(1)
                        except exc.IntegrityError:
                            continue
                        except Exception as e:
                            print(f"插入单行时出错: {str(e)}")
                            continue
                except Exception as e:
                    print(f"插入数据块时出错: {str(e)}")
                    continue

        print(f"\n成功导入 {processed_rows}/{total_rows} 条数据到表 {table_name}")
        return True

    except Exception as e:
        print(f"导入过程出错: {str(e)}")
        return False


# MySQL配置 - 请替换为你的实际配置
mysql_config = {
    'host': 'localhost',
    'port': 3306,
    'user': 'your_username',
    'password': 'your_password',
    'database': 'stock_data'
}

# 使用示例
if __name__ == "__main__":
    # 测试连接
    try:
        test_engine = create_engine(
            f"mysql+pymysql://{mysql_config['user']}:{mysql_config['password']}@{mysql_config['host']}:{mysql_config['port']}/{mysql_config['database']}?charset=utf8mb4")
        with test_engine.connect() as conn:
            conn.execute(text("SELECT 1"))
        print("数据库连接测试成功")
    except Exception as e:
        print(f"数据库连接失败: {str(e)}")
        exit(1)

    # 单个文件导入
    csv_file = "600000_5min_hfq.csv"  # 替换为你的CSV文件路径
    if csv_to_mysql(csv_file, mysql_config):
        print("导入成功")
    else:
        print("导入失败")