import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import pandas as pd
import boto3
import yaml
import pymysql
import re

limit_upload_counter = 0


class CSVFileHandler(FileSystemEventHandler):

    def __init__(self, config):
        self.config = config
        self.sql_statements = config['sql_statements']
        self.data_columns = config['data_columns']
        self.db_connection = None
        self.connect_to_db()
        self.CSV_DIR = config['directories']['csv_dir']
        self.JPG_DIR = config['directories']['jpeg_dir']
        self.MINIO_ENDPOINT = config['minio']['endpoint']
        self.MINIO_ACCESS_KEY = config['minio']['access_key']
        self.MINIO_SECRET_KEY = config['minio']['secret_key']
        self.BUCKET_NAME = config['minio']['bucket']
        self.select_last_order_id_sql = config['sql']['select_last_order_id']
        self.select_last_primary_id = config['sql']['select_last_primary_id']
        self.processed_files = set()
        self.delay_timer = None
        self.initial_jpg_files = set(os.listdir(self.JPG_DIR))
        self.new_images = None
        self.minio_client = None  # 初始化为空

    def connect_to_db(self):
        """ 连接到数据库 """
        self.db_connection = pymysql.connect(
            host=self.config['database']['host'],
            port=self.config['database']['port'],
            user=self.config['database']['user'],
            password=self.config['database']['password'],
            database=self.config['database']['db']
        )

    def reconnect_to_db(self):
        """ 断线重连数据库 """
        while True:
            try:
                self.connect_to_db()
                print("数据库重新连接成功")
                break
            except pymysql.MySQLError as e:
                print(f"数据库连接失败，正在重试... 错误: {e}")
                time.sleep(5)  # 等待5秒后重试

    def check_db_connection(self):
        """ 检查数据库连接是否有效，如果无效则重连 """
        try:
            self.db_connection.ping(reconnect=True)
        except pymysql.MySQLError:
            self.reconnect_to_db()

    def count_placeholders(self, sql_query):
        """ 计算SQL查询中的占位符数量 """
        return sql_query.count('%s')

    def extract_fields(self, sql_query):
        """ 提取SQL查询中的字段名 """
        match = re.search(r'INSERT INTO \w+\((.*?)\)', sql_query, re.IGNORECASE | re.DOTALL)
        if match:
            fields = match.group(1).split(',')
            return [field.strip() for field in fields]
        return []

    def get_latest_csv_file(self):
        files = [os.path.join(self.CSV_DIR, f) for f in os.listdir(self.CSV_DIR) if f.endswith('.CSV')]
        if not files:
            return None
        latest_file = max(files, key=os.path.getmtime)
        return latest_file

    def parse_csv(self, file_path):
        # 检查part1
        part1_columns = self.data_columns['part1']['columns']
        part1_placeholder_count = self.count_placeholders(self.sql_statements['insert_pcb_test_records'])
        part1_fields = self.extract_fields(self.sql_statements['insert_pcb_test_records'])
        print(
            f"part1 columns: {len(part1_columns)}, placeholders: {part1_placeholder_count}, fields: {len(part1_fields)}")
        # 检查part2
        part2_columns = self.data_columns['part2']['columns']
        part2_placeholder_count = self.count_placeholders(self.sql_statements['insert_pcb_test_records_details'])
        part2_fields = self.extract_fields(self.sql_statements['insert_pcb_test_records_details'])
        print(
            f"part2 columns: {len(part2_columns)}, placeholders: {part2_placeholder_count}, fields: {len(part2_fields)}")
        if len(part1_columns) + 1 != part1_placeholder_count or part1_placeholder_count != len(part1_fields):
            raise ValueError(
                f"part1配置中的列数({len(part1_columns)})加上1不等于占位符数量({part1_placeholder_count})或占位符数量不等于字段数({len(part1_fields)})")
        if len(part2_columns) + 3 != part2_placeholder_count or part2_placeholder_count != len(part2_fields):
            raise ValueError(
                f"part2配置中的列数({len(part2_columns)})加上2不等于占位符数量({part2_placeholder_count})或占位符数量不等于字段数({len(part2_fields)})")

        df = pd.read_csv(file_path, encoding='GB2312', header=None, names=range(31))

        # 使用配置文件中的列配置
        part1_config = self.data_columns['part1']
        part1 = df.iloc[1, part1_config['columns']].fillna("")

        part2_config = self.data_columns['part2']
        part2_start = part2_config['start_row']

        # 检查part2_start是否超出DataFrame的行数范围
        if part2_start >= len(df):
            print("part2_start超出DataFrame的行数范围，part2内容为空")
            return part1, None

        while pd.isna(df.iloc[part2_start, 0]):  # 跳过空白行，直到有非空白行为止
            part2_start += 1
            if part2_start >= len(df):  # 再次检查是否超出范围
                print("part2_start超出DataFrame的行数范围，part2内容为空")
                return part1, None

        part2 = df.iloc[part2_start:, part2_config['columns']].fillna("")
        if part2.empty:
            print("part2 内容为空，跳过主表和子表的插入操作")
            return part1, None

        return part1, part2

    def insert_into_db(self, part1, part2, last_order_id, urls):
        self.check_db_connection()  # 检查数据库连接
        with self.db_connection.cursor() as cursor:
            try:
                values = tuple(part1[col] for col in self.data_columns['part1']['columns']) + (last_order_id,)
                print("开始执行主表插入")
                cursor.execute(self.sql_statements['insert_pcb_test_records'], values)
                print("主表执行成功")
                self.db_connection.commit()
                cursor.execute(self.select_last_primary_id)
                result1 = cursor.fetchone()
                print(result1)
                if result1:
                    last_primary_id = result1[0]
                    print(last_primary_id)
                    print("获取到主表id字段数据")
                else:
                    print("未在表中找到数据.")
                    return
                print("开始执行子表操作")
                for _, row in part2.iterrows():
                    row['smt_out_source_order'] = last_order_id
                    matched_url = ''
                    for url in urls:
                        parts = url.rsplit('_', 3)
                        if len(parts) > 2:
                            middle_number = parts[1]
                            if str(middle_number) == str(row[1]):  # 确保类型一致
                                matched_url = url
                                break
                    row_dict = row.to_dict()
                    row_dict['url'] = matched_url
                    values1 = tuple(row_dict[col] for col in self.data_columns['part2']['columns']) + (
                        row_dict['url'], last_order_id, last_primary_id)
                    cursor.execute(self.sql_statements['insert_pcb_test_records_details'], values1)
                self.db_connection.commit()
                print("子表执行成功")
            except Exception as e:
                self.db_connection.rollback()
                print(f'Error inserting URL into database: {e}')

    def on_modified(self, event):
        global limit_upload_counter
        print(limit_upload_counter)
        if not event.is_directory and event.src_path.endswith('.CSV'):
            if limit_upload_counter >= 1:
                limit_upload_counter = 0
                print(limit_upload_counter)
                return
            else:
                limit_upload_counter += 1
                print(limit_upload_counter)
                print(f'文件 {event.src_path} 发生变化.')
                time.sleep(0.3)
                # 获取最新修改的CSV文件
                latest_csv_file = self.get_latest_csv_file()
                if latest_csv_file:
                    print("处理文件")
                    part1, part2 = self.parse_csv(latest_csv_file)
                    if part1 is None or part2 is None:
                        return
                    self.check_db_connection()
                    with self.db_connection.cursor() as cursor:
                        cursor.execute(self.select_last_order_id_sql)
                        result = cursor.fetchone()
                        if result:
                            last_order_id = result[0]
                            print("获取到smt_out_source_order字段数据")
                            # 检查是否有新图片
                            self.new_images = set(os.listdir(self.JPG_DIR)) - self.initial_jpg_files
                            urls = []
                            for img in self.new_images:
                                img_path = os.path.join(self.JPG_DIR, img)
                                url = self.upload_to_minio(img_path)
                                urls.append(url)
                                print(f'图片上传至: {url}')
                            self.insert_into_db(part1, part2, last_order_id, urls)
                            self.initial_jpg_files.update(self.new_images)
                        else:
                            print("未在表中找到数据.")

    def upload_to_minio(self, file_path):
        if not self.minio_client:
            self.minio_client = boto3.client(
                's3',
                endpoint_url=self.MINIO_ENDPOINT,
                aws_access_key_id=self.MINIO_ACCESS_KEY,
                aws_secret_access_key=self.MINIO_SECRET_KEY
            )
        file_name = os.path.basename(file_path)
        bucket_name = self.config['minio']['bucket']
        object_name = f"AOI/{file_name}"  # 构造对象名称，包含 AOI 文件夹
        self.minio_client.upload_file(file_path, bucket_name, object_name)
        return f'{self.MINIO_ENDPOINT}/{self.BUCKET_NAME}/{object_name}'


if __name__ == "__main__":
    with open('config1.yml', 'r') as file:
        config = yaml.safe_load(file)
    event_handler = CSVFileHandler(config)
    observer = Observer()
    observer.schedule(event_handler, config['directories']['csv_dir'], recursive=False)
    observer.start()

    print(f'Starting to monitor {config["directories"]["csv_dir"]}...')
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
