import py7zr
import pandas as pd
import os
import tempfile
import logging
from datetime import datetime
import warnings
from sqlalchemy import create_engine, text, exc
import re
import time
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
import math
warnings.filterwarnings('ignore')

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('stock_data_processing.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class StockMinuteDataProcessor:
    def __init__(self, base_path, db_config, max_workers=8):
        """
        初始化股票分钟线数据处理器
        
        :param base_path: 基础路径 (D:\BaiduNetdiskDownload)
        :param db_config: 数据库配置
        :param max_workers: 最大线程数
        """
        self.base_path = base_path
        self.db_config = db_config
        self.max_workers = max_workers
        self.engine = self._create_engine()
        self.processed_files = 0
        self.processed_records = 0
        self.table_locks = {}  # 表锁字典，确保线程安全
        self.table_lock = threading.Lock()  # 表锁的锁
        self.created_tables = set()  # 记录已创建的表，避免重复创建
        
    def _create_engine(self):
        """创建数据库引擎"""
        try:
            connection_string = (
                f"mysql+pymysql://{self.db_config['user']}:{self.db_config['password']}@"
                f"{self.db_config['host']}:{self.db_config['port']}/{self.db_config['database']}"
                f"?charset=utf8mb4"
            )
            engine = create_engine(connection_string, echo=False, pool_pre_ping=True, pool_size=20, max_overflow=30)
            logger.info("数据库连接创建成功")
            return engine
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            raise

    def _get_table_lock(self, table_name):
        """获取表锁，确保线程安全"""
        with self.table_lock:
            if table_name not in self.table_locks:
                self.table_locks[table_name] = threading.Lock()
            return self.table_locks[table_name]

    def create_stock_table(self, ts_code):
        """
        为单个股票创建数据表 - 修复 SQLAlchemy 2.0 兼容性问题
        """
        table_name = f"base_daily_datetime_{ts_code}"
        
        # 检查是否已经创建过
        if table_name in self.created_tables:
            return True
            
        with self._get_table_lock(table_name):
            try:
                # 检查表是否存在 - 使用新的 SQLAlchemy 2.0 语法
                with self.engine.connect() as conn:
                    check_sql = text(f"SHOW TABLES LIKE :table_name")
                    result = conn.execute(check_sql, {'table_name': table_name})
                    
                    if result.fetchone() is None:
                        # 表不存在，创建表
                        logger.info(f"开始创建表: {table_name}")
                        
                        # 使用新的 SQLAlchemy 2.0 语法执行创建表
                        create_sql = text(f"""
                            CREATE TABLE `{table_name}` (
                              `id` int(11) NOT NULL AUTO_INCREMENT,
                              `ts_code` varchar(255) DEFAULT NULL,
                              `trade_date` int(11) DEFAULT NULL,
                              `trade_date_txt` varchar(255) DEFAULT NULL,
                              `open` decimal(10,2) DEFAULT NULL,
                              `high` decimal(10,2) DEFAULT NULL,
                              `low` decimal(10,2) DEFAULT NULL,
                              `close` decimal(10,2) DEFAULT NULL,
                              `volume` int(11) DEFAULT NULL,
                              `amount` decimal(20,2) DEFAULT NULL,
                              `pre_close` decimal(10,2) DEFAULT NULL,
                              `pre_avg` decimal(10,3) DEFAULT NULL,
                              PRIMARY KEY (`id`),
                              KEY `idx_trade_date` (`trade_date`),
                              KEY `idx_ts_code` (`ts_code`)
                            ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
                        """)
                        
                        # 执行创建表
                        conn.execute(create_sql)
                        conn.commit()
                        
                        # 验证表是否创建成功
                        result = conn.execute(check_sql, {'table_name': table_name})
                        if result.fetchone() is not None:
                            self.created_tables.add(table_name)
                            logger.info(f"成功创建表: {table_name}")
                        else:
                            logger.error(f"表创建后验证失败: {table_name}")
                            return False
                    else:
                        self.created_tables.add(table_name)
                        logger.debug(f"表已存在: {table_name}")
                    
                return True
                    
            except exc.SQLAlchemyError as e:
                logger.error(f"创建表 {table_name} 失败 (SQL错误): {e}")
                # 如果是表已存在的错误，忽略
                if "already exists" in str(e).lower():
                    self.created_tables.add(table_name)
                    logger.info(f"表已存在 (忽略错误): {table_name}")
                    return True
                return False
            except Exception as e:
                logger.error(f"创建表 {table_name} 失败: {e}")
                return False

    def process_year_range(self, start_year=2022, end_year=2025):
        """
        处理指定年份范围的数据（多线程版本，从后往前处理）
        """
        logger.info(f"开始处理 {start_year}-{end_year} 年的分钟线数据，使用 {self.max_workers} 个线程，从后往前处理")
        
        try:
            # 收集所有需要处理的7z文件路径（从后往前）
            archive_files = self._collect_archive_files_reverse(start_year, end_year)
            logger.info(f"总共找到 {len(archive_files)} 个压缩文件")
            
            if not archive_files:
                logger.warning("没有找到任何压缩文件")
                return
            
            # 使用线程池处理文件
            with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                # 提交所有任务
                future_to_file = {
                    executor.submit(self._process_7z_file_thread, file_path): file_path 
                    for file_path in archive_files
                }
                
                # 处理完成的任务
                completed_count = 0
                total_files = len(archive_files)
                
                for future in as_completed(future_to_file):
                    file_path = future_to_file[future]
                    completed_count += 1
                    try:
                        result = future.result()
                        if result:
                            self.processed_files += 1
                            self.processed_records += result
                            logger.info(f"[进度 {completed_count}/{total_files}] 文件处理完成: {os.path.basename(file_path)}, 新增记录: {result}")
                        else:
                            logger.info(f"[进度 {completed_count}/{total_files}] 文件处理完成但无记录: {os.path.basename(file_path)}")
                    except Exception as e:
                        logger.error(f"[进度 {completed_count}/{total_files}] 处理文件 {file_path} 时出错: {e}")
            
            logger.info(f"处理完成! 总共处理了 {self.processed_files} 个文件，{self.processed_records} 条记录")
            
        except Exception as e:
            logger.error(f"处理过程中出现错误: {e}")
            import traceback
            logger.error(f"详细错误信息:\n{traceback.format_exc()}")

    def _collect_archive_files_reverse(self, start_year, end_year):
        """
        收集所有需要处理的7z文件路径（从后往前）
        """
        archive_files = []
        
        # 从后往前遍历年份
        for year in range(end_year, start_year - 1, -1):
            year_folder = os.path.join(self.base_path, str(year))
            
            if not os.path.exists(year_folder):
                logger.warning(f"年份文件夹不存在: {year_folder}")
                continue
                
            # 从后往前遍历月份
            for month in range(12, 0, -1):
                month_str = f"{month:02d}"
                month_folder = os.path.join(year_folder, month_str)
                
                if os.path.exists(month_folder):
                    # 收集该月份的所有7z文件
                    try:
                        files = [f for f in os.listdir(month_folder) if f.endswith('.7z')]
                        # 文件也按反向排序
                        files.sort(reverse=True)
                        for file_name in files:
                            file_path = os.path.join(month_folder, file_name)
                            archive_files.append(file_path)
                            logger.debug(f"收集到文件: {file_path}")
                    except Exception as e:
                        logger.error(f"访问月份文件夹 {month_folder} 时出错: {e}")
        
        logger.info(f"文件收集完成，总共 {len(archive_files)} 个文件，按从后往前顺序处理")
        return archive_files

    def _process_7z_file_thread(self, archive_path):
        """
        线程安全的7z文件处理函数
        """
        thread_name = threading.current_thread().name
        file_name = os.path.basename(archive_path)
        logger.info(f"[{thread_name}] 开始处理文件: {file_name}")
        
        total_records = 0
        
        # 创建临时目录
        with tempfile.TemporaryDirectory() as temp_dir:
            try:
                # 解压7z文件
                logger.info(f"[{thread_name}] 开始解压文件: {archive_path}")
                with py7zr.SevenZipFile(archive_path, 'r') as archive:
                    archive.extractall(path=temp_dir)
                
                # 检查解压后的目录结构
                temp_contents = os.listdir(temp_dir)
                if len(temp_contents) == 1 and os.path.isdir(os.path.join(temp_dir, temp_contents[0])):
                    actual_extract_dir = os.path.join(temp_dir, temp_contents[0])
                else:
                    actual_extract_dir = temp_dir
                
                # 处理解压后的CSV文件
                total_records = self._process_extracted_files_thread(actual_extract_dir, file_name, thread_name)
                
            except Exception as e:
                logger.error(f"[{thread_name}] 处理文件 {file_name} 时出错: {e}")
        
        logger.info(f"[{thread_name}] 文件处理完成: {file_name}, 记录数: {total_records}")
        return total_records

    def _process_extracted_files_thread(self, extract_dir, archive_name, thread_name):
        """
        线程安全的解压文件处理
        """
        total_records = 0
        csv_files = [f for f in os.listdir(extract_dir) if f.endswith('.csv')]
        
        logger.info(f"[{thread_name}] 找到 {len(csv_files)} 个CSV文件")
        
        for csv_file in csv_files:
            csv_path = os.path.join(extract_dir, csv_file)
            
            # 从CSV文件名提取ts_code
            if csv_file.endswith('.csv'):
                ts_code = csv_file[:-4].strip()
            else:
                logger.warning(f"[{thread_name}] 无法从文件名提取ts_code: {csv_file}")
                continue
                
            # 验证ts_code格式
            if not re.match(r'^\d{6}$', ts_code):
                logger.warning(f"[{thread_name}] ts_code格式不正确: {ts_code}，文件: {csv_file}")
                continue
                
            try:
                # 读取CSV文件
                df = self._read_csv_file(csv_path)
                if df is None or df.empty:
                    continue
                
                # 处理数据并插入数据库
                records_inserted = self._process_and_insert_data(df, ts_code, thread_name)
                total_records += records_inserted
                
                if records_inserted > 0:
                    logger.info(f"[{thread_name}] 股票 {ts_code} 插入 {records_inserted} 条记录")
                
            except Exception as e:
                logger.error(f"[{thread_name}] 处理CSV文件 {csv_file} 时出错: {e}")
                continue
        
        return total_records

    def _read_csv_file(self, csv_path):
        """
        读取CSV文件，尝试多种编码
        """
        encodings = ['gbk', 'utf-8', 'gb2312', 'latin1']
        
        for encoding in encodings:
            try:
                df = pd.read_csv(csv_path, encoding=encoding)
                logger.debug(f"成功使用 {encoding} 编码读取文件: {os.path.basename(csv_path)}")
                return df
            except Exception as e:
                logger.debug(f"使用 {encoding} 编码读取失败: {e}")
                continue
        
        logger.error(f"无法使用任何编码读取文件: {csv_path}")
        return None

    def _process_and_insert_data(self, df, ts_code, thread_name):
        """
        处理数据并插入到对应的股票表中
        """
        try:
            # 创建股票数据表（如果不存在）
            table_created = self.create_stock_table(ts_code)
            if not table_created:
                logger.error(f"[{thread_name}] 无法为股票 {ts_code} 创建表，跳过处理")
                return 0
            
            # 列名映射
            column_mapping = self._map_columns(df.columns)
            
            if not column_mapping:
                logger.warning(f"[{thread_name}] 无法识别CSV文件的列结构: {df.columns.tolist()}")
                return 0
            
            # 重命名列
            df_rename = df.rename(columns=column_mapping)
            
            # 处理数据
            processed_data = self._process_dataframe(df_rename, ts_code)
            if processed_data.empty:
                return 0
            
            # 插入数据
            return self._insert_to_stock_table(processed_data, ts_code, thread_name)
            
        except Exception as e:
            logger.error(f"[{thread_name}] 处理股票 {ts_code} 数据时出错: {e}")
            return 0

    def _map_columns(self, columns):
        """
        映射列名到标准列名
        """
        column_mapping = {}
        expected_columns = {
            'date': ['date', 'Date', '日期', 'trade_date', 'datetime'],
            'time': ['time', 'Time', '时间', 'trade_time', 'minute'],
            'open': ['open', 'Open', '开盘价', '开盘', '开盘价格'],
            'high': ['high', 'High', '最高价', '最高', '最高价格'],
            'low': ['low', 'Low', '最低价', '最低', '最低价格'],
            'close': ['close', 'Close', '收盘价', '收盘', '收盘价格'],
            'vol': ['volume', 'vol', 'Volume', '成交量', '量', '成交股数'],
            'amount': ['amount', 'money', 'Amount', '成交额', '金额', '成交金额'],
            'pre_close': ['pre_close', '昨收', '前收盘'],
            'pre_avg': ['pre_avg', '昨均价']
        }
        
        for standard_col, possible_names in expected_columns.items():
            for col in columns:
                col_clean = str(col).strip()
                if col_clean in possible_names or col_clean.lower() in [name.lower() for name in possible_names]:
                    column_mapping[col] = standard_col
                    break
        
        return column_mapping

    def _process_dataframe(self, df, ts_code):
        """
        处理DataFrame数据
        """
        df_process = pd.DataFrame()
        
        # 转换日期时间
        timestamps = []
        time_strings = []
        
        for idx, row in df.iterrows():
            timestamp = self._datetime_to_timestamp(row.get('date'), row.get('time'))
            time_string = self._datetime_to_string(row.get('date'), row.get('time'))
            timestamps.append(timestamp)
            time_strings.append(time_string)
        
        # 添加基础列
        df_process['trade_date'] = timestamps
        df_process['trade_date_txt'] = time_strings
        df_process['ts_code'] = ts_code
        
        # 过滤无效时间戳
        df_process = df_process.dropna(subset=['trade_date'])
        
        if df_process.empty:
            return df_process
        
        # 添加数值列
        numeric_columns = {
            'open': 'open',
            'high': 'high', 
            'low': 'low',
            'close': 'close',
            'vol': 'volume',
            'amount': 'amount',
            'pre_close': 'pre_close',
            'pre_avg': 'pre_avg'
        }
        
        for source_col, target_col in numeric_columns.items():
            if source_col in df.columns:
                df_process[target_col] = pd.to_numeric(df[source_col], errors='coerce')
            else:
                df_process[target_col] = None
        
        # 确保必要的列存在
        required_columns = ['open', 'high', 'low', 'close', 'volume', 'amount']
        for col in required_columns:
            if col not in df_process.columns:
                if col in ['volume', 'amount']:
                    df_process[col] = 0
                else:
                    df_process[col] = None
        
        # 类型转换
        df_process['trade_date'] = pd.to_numeric(df_process['trade_date'], errors='coerce').astype('Int64')
        
        numeric_cols = ['open', 'high', 'low', 'close', 'volume', 'amount', 'pre_close', 'pre_avg']
        for col in numeric_cols:
            if col in df_process.columns:
                df_process[col] = pd.to_numeric(df_process[col], errors='coerce')
        
        # 最终过滤
        df_final = df_process.dropna(subset=['trade_date'])
        
        return df_final

    def _insert_to_stock_table(self, df, ts_code, thread_name):
        """
        插入数据到股票对应的表中
        """
        if df.empty:
            return 0
        
        table_name = f"base_daily_datetime_{ts_code}"
        success_count = 0
        
        try:
            # 使用表锁确保线程安全
            with self._get_table_lock(table_name):
                # 分批插入数据
                chunk_size = 500  # 适中的chunk size
                total_chunks = math.ceil(len(df) / chunk_size)
                
                for i in range(0, len(df), chunk_size):
                    chunk = df.iloc[i:i+chunk_size]
                    
                    # 清理数据
                    chunk_clean = chunk.copy()
                    chunk_clean['trade_date'] = pd.to_numeric(chunk_clean['trade_date'], errors='coerce').astype('Int64')
                    
                    # 过滤有效时间戳范围
                    valid_min_timestamp = 946684800  # 2000-01-01
                    valid_max_timestamp = 1893456000  # 2030-01-01
                    chunk_clean = chunk_clean[
                        (chunk_clean['trade_date'] >= valid_min_timestamp) & 
                        (chunk_clean['trade_date'] <= valid_max_timestamp)
                    ]
                    
                    if not chunk_clean.empty:
                        try:
                            # 插入数据
                            chunk_clean.to_sql(
                                name=table_name,
                                con=self.engine,
                                if_exists='append',
                                index=False,
                                method='multi',
                                chunksize=250
                            )
                            success_count += len(chunk_clean)
                            logger.debug(f"[{thread_name}] 插入 {len(chunk_clean)} 条记录到 {table_name} (批次 {(i//chunk_size)+1}/{total_chunks})")
                            
                        except Exception as chunk_error:
                            logger.error(f"[{thread_name}] 插入批次失败: {chunk_error}")
                            # 尝试逐行插入
                            success_count += self._insert_rows_individually(chunk_clean, table_name, thread_name)
            
            if success_count > 0:
                logger.info(f"[{thread_name}] 股票 {ts_code} 成功插入 {success_count} 条记录")
            return success_count
            
        except Exception as e:
            logger.error(f"[{thread_name}] 插入表 {table_name} 时出错: {e}")
            return success_count

    def _insert_rows_individually(self, chunk, table_name, thread_name):
        """
        逐行插入数据（用于处理批量插入失败的情况）
        """
        success_count = 0
        
        for idx, row in chunk.iterrows():
            try:
                row_df = pd.DataFrame([row])
                row_df.to_sql(
                    name=table_name,
                    con=self.engine,
                    if_exists='append',
                    index=False
                )
                success_count += 1
            except Exception as e:
                logger.warning(f"[{thread_name}] 插入单行失败: {e}")
        
        return success_count

    def _datetime_to_timestamp(self, date_val, time_val):
        """将日期时间和时间转换为时间戳（int）"""
        try:
            if pd.isna(date_val) or date_val == '' or date_val is None:
                return None
            
            date_str = str(date_val).strip()
            
            # 处理时间值
            if pd.isna(time_val) or time_val == '' or time_val is None:
                time_str = '0000'
            else:
                try:
                    time_int = int(float(time_val))
                    time_str = str(time_int).zfill(4)
                except:
                    time_str = '0000'
            
            # 解析日期格式
            year, month, day = None, None, None
            
            if '-' in date_str:
                parts = date_str.split('-')
            elif '/' in date_str:
                parts = date_str.split('/')
            else:
                if len(date_str) == 8 and date_str.isdigit():
                    parts = [date_str[:4], date_str[4:6], date_str[6:]]
                elif len(date_str) == 6 and date_str.isdigit():
                    parts = [f"20{date_str[:2]}", date_str[2:4], date_str[4:]]
                else:
                    return None
            
            if len(parts) >= 3:
                year = str(parts[0]).strip()
                month = str(parts[1]).strip()
                day = str(parts[2]).strip()
                
                if len(year) == 2:
                    year = f"20{year}"
                elif len(year) != 4:
                    year = "2022"
                
                month = month.zfill(2)
                day = day.zfill(2)
                
                if len(time_str) >= 4:
                    hour = time_str[:2].zfill(2)
                    minute = time_str[2:4].zfill(2)
                else:
                    hour = "00"
                    minute = "00"
                
                timestamp_str = f"{year}-{month}-{day} {hour}:{minute}:00"
                
                try:
                    datetime_obj = datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S')
                    timestamp_int = int(datetime_obj.timestamp())
                    return timestamp_int
                except ValueError:
                    return None
            else:
                return None
                
        except Exception:
            return None

    def _datetime_to_string(self, date_val, time_val):
        """将日期时间和时间转换为字符串格式"""
        try:
            if pd.isna(date_val) or date_val == '' or date_val is None:
                return None
            
            date_str = str(date_val).strip()
            
            if pd.isna(time_val) or time_val == '' or time_val is None:
                time_str = '0000'
            else:
                try:
                    time_int = int(float(time_val))
                    time_str = str(time_int).zfill(4)
                except:
                    time_str = '0000'
            
            year, month, day = None, None, None
            
            if '-' in date_str:
                parts = date_str.split('-')
            elif '/' in date_str:
                parts = date_str.split('/')
            else:
                if len(date_str) == 8 and date_str.isdigit():
                    parts = [date_str[:4], date_str[4:6], date_str[6:]]
                elif len(date_str) == 6 and date_str.isdigit():
                    parts = [f"20{date_str[:2]}", date_str[2:4], date_str[4:]]
                else:
                    return None
            
            if len(parts) >= 3:
                year = str(parts[0]).strip()
                month = str(parts[1]).strip()
                day = str(parts[2]).strip()
                
                if len(year) == 2:
                    year = f"20{year}"
                elif len(year) != 4:
                    year = "2022"
                
                month = month.zfill(2)
                day = day.zfill(2)
                
                if len(time_str) >= 4:
                    hour = time_str[:2].zfill(2)
                    minute = time_str[2:4].zfill(2)
                else:
                    hour = "00"
                    minute = "00"
                
                time_string = f"{year}-{month}-{day} {hour}:{minute}:00"
                return time_string
            else:
                return None
                
        except Exception:
            return None


def main():
    # 数据库配置
    db_config = {
        'host': '172.30.168.43',
        'user': 'root',
        'password': 'DAzA8nuLfDCQfD2n',
        'database': 'stock_db',
        'port': 3306
    }
    
    # 基础路径
    base_path = r'D:\BaiduNetdiskDownload'
    
    # 创建处理器（设置线程数为8，可根据CPU核心数调整）
    processor = StockMinuteDataProcessor(base_path, db_config, max_workers=8)
    
    # 处理2022-2025年的数据（从后往前）
    try:
        start_time = time.time()
        processor.process_year_range(start_year=2022, end_year=2025)
        end_time = time.time()
        
        logger.info(f"所有数据处理完成! 总耗时: {end_time - start_time:.2f} 秒")
        logger.info(f"处理文件数: {processor.processed_files}")
        logger.info(f"处理记录数: {processor.processed_records}")
        
    except Exception as e:
        logger.error(f"处理失败: {e}")

if __name__ == "__main__":
    main()
