import os
import time
import logging
import threading
import psycopg2
from psycopg2 import sql
from obs import ObsClient  # 华为云OBS SDK
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.memory import MemoryJobStore
from datetime import datetime

# 配置信息
DB_CONFIG = {
    'dbname': 'your_dbname',
    'user': 'your_username',
    'password': 'your_password',
    'host': 'localhost',
    'port': '5432'
}

OBS_CONFIG = {
    'access_key': 'your_ak',
    'secret_key': 'your_sk',
    'server': 'https://your.obs-endpoint.com',
    'bucket': 'your-bucket-name',
    'prefix': 'data/'  # OBS文件前缀
}

# 日志配置
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("file_processor.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger()

# 全局锁防止重复执行
fetch_lock = threading.Lock()
process_lock = threading.Lock()


# 创建数据库表
def create_table():
    try:
        conn = psycopg2.connect(**DB_CONFIG)
        cursor = conn.cursor()
        cursor.execute("""
            CREATE TABLE IF NOT EXISTS file_processing (
                file_name TEXT PRIMARY KEY,
                is_processed BOOLEAN NOT NULL DEFAULT FALSE,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        """)
        conn.commit()
        logger.info("Table created/verified successfully")
    except Exception as e:
        logger.error(f"Error creating table: {str(e)}")
    finally:
        if conn:
            cursor.close()
            conn.close()


# 获取OBS文件列表
def get_obs_files():
    max_retries = 3
    for attempt in range(max_retries):
        try:
            obs_client = ObsClient(
                access_key_id=OBS_CONFIG['access_key'],
                secret_access_key=OBS_CONFIG['secret_key'],
                server=OBS_CONFIG['server']
            )
            resp = obs_client.listObjects(
                OBS_CONFIG['bucket'],
                prefix=OBS_CONFIG['prefix']
            )
            if resp.status < 300:
                return [content.key for content in resp.body.contents]
            else:
                logger.error(f"OBS list error: {resp.errorCode}")
                raise Exception(f"OBS error: {resp.errorCode}")
        except Exception as e:
            logger.error(f"Attempt {attempt + 1} failed: {str(e)}")
            if attempt < max_retries - 1:
                time.sleep(2 ** attempt)  # 指数退避
            else:
                logger.error("All OBS connection attempts failed")
                return []


# 存储新文件到数据库
def fetch_and_store_files():
    if not fetch_lock.acquire(blocking=False):
        logger.warning("Fetch task already running. Skipping...")
        return

    try:
        logger.info("Starting file fetch task...")
        obs_files = get_obs_files()

        if not obs_files:
            logger.warning("No files found in OBS")
            return

        conn = psycopg2.connect(**DB_CONFIG)
        cursor = conn.cursor()

        # 获取已有文件
        cursor.execute("SELECT file_name FROM file_processing")
        db_files = {row[0] for row in cursor.fetchall()}

        # 插入新文件
        new_files = [f for f in obs_files if f not in db_files]
        for file_name in new_files:
            try:
                cursor.execute(
                    "INSERT INTO file_processing (file_name, is_processed) "
                    "VALUES (%s, FALSE) ON CONFLICT (file_name) DO NOTHING",
                    (file_name,)
                )
                logger.info(f"Added new file: {file_name}")
            except Exception as e:
                logger.error(f"Error inserting {file_name}: {str(e)}")

        conn.commit()
        logger.info(f"Fetch task completed. New files: {len(new_files)}")

    except Exception as e:
        logger.error(f"Fetch task failed: {str(e)}", exc_info=True)
    finally:
        if 'cursor' in locals():
            cursor.close()
        if 'conn' in locals():
            conn.close()
        fetch_lock.release()


# 下载并处理文件
def process_file(file_name):
    max_retries = 3
    for attempt in range(max_retries):
        try:
            logger.info(f"Processing {file_name} (attempt {attempt + 1})")
            obs_client = ObsClient(
                access_key_id=OBS_CONFIG['access_key'],
                secret_access_key=OBS_CONFIG['secret_key'],
                server=OBS_CONFIG['server']
            )

            # 下载文件
            resp = obs_client.getObject(
                OBS_CONFIG['bucket'],
                file_name,
                downloadPath=f"./downloads/{os.path.basename(file_name)}"
            )

            if resp.status < 300:
                # 此处添加您的处理逻辑
                logger.info(f"Downloaded {file_name}, size: {resp.body.size} bytes")

                # 模拟耗时处理
                time.sleep(10)

                # 标记为已处理
                conn = psycopg2.connect(**DB_CONFIG)
                cursor = conn.cursor()
                cursor.execute(
                    "UPDATE file_processing SET is_processed = TRUE "
                    "WHERE file_name = %s",
                    (file_name,)
                )
                conn.commit()
                logger.info(f"Successfully processed {file_name}")
                return True
            else:
                logger.error(f"Download failed for {file_name}: {resp.errorCode}")
                raise Exception(f"OBS error: {resp.errorCode}")

        except Exception as e:
            logger.error(f"Processing failed for {file_name}: {str(e)}")
            if attempt < max_retries - 1:
                time.sleep(5)
            else:
                logger.error(f"All processing attempts failed for {file_name}")
                return False
        finally:
            if 'cursor' in locals():
                cursor.close()
            if 'conn' in locals():
                conn.close()


# 多线程处理任务
def process_files_task():
    if not process_lock.acquire(blocking=False):
        logger.warning("Process task already running. Skipping...")
        return

    try:
        logger.info("Starting file processing task...")
        conn = psycopg2.connect(**DB_CONFIG)
        cursor = conn.cursor()
        cursor.execute(
            "SELECT file_name FROM file_processing "
            "WHERE is_processed = FALSE LIMIT 50"  # 限制每次处理数量
        )
        unprocessed_files = [row[0] for row in cursor.fetchall()]

        if not unprocessed_files:
            logger.info("No unprocessed files found")
            return

        # 使用线程池处理
        from concurrent.futures import ThreadPoolExecutor
        with ThreadPoolExecutor(max_workers=5) as executor:
            results = list(executor.map(process_file, unprocessed_files))

        success_count = sum(1 for r in results if r)
        logger.info(f"Processing task completed. Success: {success_count}/{len(unprocessed_files)}")

    except Exception as e:
        logger.error(f"Processing task failed: {str(e)}", exc_info=True)
    finally:
        if 'cursor' in locals():
            cursor.close()
        if 'conn' in locals():
            conn.close()
        process_lock.release()


# 定时任务调度
def schedule_tasks():
    scheduler = BackgroundScheduler(jobstores={'default': MemoryJobStore()})

    # 每小时获取新文件
    scheduler.add_job(
        fetch_and_store_files,
        'interval',
        hours=1,
        next_run_time=datetime.now()
    )

    # 每4小时处理文件
    scheduler.add_job(
        process_files_task,
        'interval',
        hours=4,
        next_run_time=datetime.now()
    )

    scheduler.start()
    logger.info("Scheduler started")

    try:
        while True:
            time.sleep(1)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
        logger.info("Scheduler stopped")


if __name__ == "__main__":
    # 初始化环境
    os.makedirs("./downloads", exist_ok=True)
    create_table()

    # 启动定时任务
    schedule_tasks()