import logging
import os
import requests
import threading
import time
from json import dumps
from kafka import KafkaProducer
from kafka.errors import KafkaTimeoutError, KafkaConnectionError, NoBrokersAvailable

from health_check import start_health_check_server, update_component_status, increment_metric

# 配置日志
log_dir = '/app/logs'
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

logging.basicConfig(
    filename=os.path.join(log_dir, 'rtj_producer.log'),
    level=logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO')),
    format='%(asctime)s - %(levelname)s - %(message)s'
)

# 配置 - 从环境变量获取
KAFKA_SERVERS = os.environ.get('KAFKA_SERVERS', 'kafka:9094').split(',')
TOPIC_NAME = 'LIVE_PRICE_RTJDATA'
FETCH_INTERVAL = int(os.environ.get('FETCH_INTERVAL', 1))  # 请求间隔，秒
MAX_RETRIES = int(os.environ.get('MAX_RETRIES', 3))  # 最大重试次数
HEALTH_CHECK_PORT = int(os.environ.get('HEALTH_CHECK_PORT', 8001))  # 健康检查端口

# 全局组件
kafka_producer = None
request_thread = None
health_check_thread = None
monitor_thread = None
health_server = None

class RTJPriceDTO:
    def __init__(self, **kwargs):
        self.timestamp = int(time.time() * 1000)
        for key, value in kwargs.items():
            setattr(self, key, value)

def init_kafka_producer():
    global kafka_producer
    try:
        # 关闭现有的生产者
        if kafka_producer is not None:
            try:
                kafka_producer.close()
                logging.info("Closed existing Kafka producer")
            except Exception as e:
                logging.error(f"Error closing existing Kafka producer: {e}")

        # 创建新的生产者
        kafka_producer = KafkaProducer(
            bootstrap_servers=KAFKA_SERVERS,
            # 可靠性配置
            acks=1,  # 只需要 leader 确认即可
            retries=3,  # 增加重试次数
            retry_backoff_ms=500,  # 增加重试间隔
            
            # 性能优化配置
            batch_size=16384,  # 16KB
            linger_ms=50,# 等待100ms以允许批量发送
            buffer_memory=33554432,  # 32MB
            
            # 连接配置
            request_timeout_ms=30000,  # 增加超时时间
            api_version_auto_timeout_ms=30000,
            connections_max_idle_ms=600000,  # 10分钟
            max_block_ms=60000,  # 1分钟
            metadata_max_age_ms=600000,  # 10分钟
            
            # 重连配置
            reconnect_backoff_ms=1000,  # 增加重连间隔
            reconnect_backoff_max_ms=5000,  # 增加最大重连间隔
            
            # 序列化配置
            key_serializer=lambda x: str(x).encode('utf-8'),
            value_serializer=lambda x: dumps(x).encode('utf-8'),
            security_protocol='PLAINTEXT'
        )
        
        # 测试连接
        kafka_producer.metrics()
        logging.info(f"Kafka producer initialized successfully with servers: {KAFKA_SERVERS}")
        update_component_status("rtj_kafka_producer", True)
        return True
    except Exception as e:
        logging.error(f"Failed to initialize Kafka producer: {e}")
        update_component_status("rtj_kafka_producer", False)
        increment_metric("errors")
        return False

def get_rtj_price(retries=MAX_RETRIES, retry_delay=1):
    url = "http://goldapi.szzjtech.cn/bfln.php"
    
    increment_metric("requests_total")
    
    for attempt in range(retries):
        try:
            logging.debug(f"Requesting RTJ price data (attempt {attempt + 1}/{retries})")
            response = requests.get(url, timeout=5)
            response.raise_for_status()

            data = response.json()
            logging.debug(f"Raw response: {data}")

            # 检查 API 返回状态
            if data.get("code") != 200:
                error_msg = data.get("msg", "Unknown error")
                logging.error(f"API returned error: {error_msg}")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            result = data.get("data", {}).get("items", {})
            if not result:
                logging.error("Empty result from API")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            # 转换数据格式为RtjDataPriceMessage格式
            price_data = {
                "data": {
                    item.get("code", ""): {  # 使用品种代码作为键，而不是数字索引
                        "code": item.get("code", ""),
                        "askprice": item.get("askprice", "0"),
                        "bidprice": item.get("bidprice", "0"),
                        "high": item.get("high", "0"),
                        "low": item.get("low", "0")
                    }
                    for item in result.values()  # 直接遍历 result 的值
                }
            }

            logging.info(f"Successfully retrieved RTJ price data")
            return price_data

        except requests.RequestException as e:
            increment_metric("errors")
            logging.error(f"请求错误 (尝试 {attempt+1}/{retries}): {e}")
        except ValueError as e:
            increment_metric("errors")
            logging.error(f"解析JSON错误 (尝试 {attempt+1}/{retries}): {e}")
        except Exception as e:
            increment_metric("errors")
            logging.error(f"未知错误 (尝试 {attempt+1}/{retries}): {e}")
        
        if attempt < retries - 1:
            logging.info(f"Retrying in {retry_delay} seconds...")
            time.sleep(retry_delay)
    
    logging.error(f"Failed to get RTJ price data after {retries} attempts")
    return None

def publish_to_kafka(data):
    if kafka_producer is None:
        logging.error("Kafka producer not initialized")
        if not init_kafka_producer():
            return False
    
    try:
        # 直接使用字典数据，不再尝试访问__dict__属性
        future = kafka_producer.send(
            TOPIC_NAME,
            key='rtj_data',
            value=data,  # 直接使用data，因为它已经是字典格式
            timestamp_ms=int(time.time() * 1000)
        )
        
        # 添加回调处理
        def on_send_success(record_metadata):
            logging.info(f"Successfully published to Kafka - Topic: {TOPIC_NAME}, Partition: {record_metadata.partition}, Offset: {record_metadata.offset}")
            increment_metric("kafka_messages_sent")
            
        def on_send_error(excp):
            logging.error(f"Failed to publish to Kafka topic {TOPIC_NAME}: {excp}")
            increment_metric("errors")
            if isinstance(excp, (KafkaTimeoutError, KafkaConnectionError, NoBrokersAvailable)):
                logging.warning("Connection error detected, will attempt to reinitialize Kafka producer in next iteration")
        
        future.add_callback(on_send_success).add_errback(on_send_error)
        
        # 每10条消息刷新一次，而不是每条消息都刷新
        if getattr(publish_to_kafka, 'message_count', 0) % 10 == 0:
            kafka_producer.flush(timeout=5)
            logging.info(f"Flushed Kafka producer buffer after {getattr(publish_to_kafka, 'message_count', 0)} messages")
        publish_to_kafka.message_count = getattr(publish_to_kafka, 'message_count', 0) + 1
        
        return True
    except Exception as e:
        logging.error(f"Failed to publish data to Kafka topic {TOPIC_NAME}: {e}")
        increment_metric("errors")
        if isinstance(e, (KafkaTimeoutError, KafkaConnectionError, NoBrokersAvailable)):
            logging.warning("Connection error detected, will attempt to reinitialize Kafka producer in next iteration")
        return False

def process_new_data(price_data):
    if price_data is None:
        return
    
    # 发布到 Kafka
    if publish_to_kafka(price_data):
        logging.info("RTJ price data queued for Kafka publishing")
    else:
        logging.error("Failed to queue RTJ price data for Kafka publishing")

def request_rtj_price():
    while True:
        try:
            data = get_rtj_price()
            if data is None:
                logging.warning("Failed to get RTJ price data, will retry in next interval")
            else:
                process_new_data(data)
        except Exception as e:
            logging.error(f"Error in RTJ price request thread: {e}")
            increment_metric("errors")
        time.sleep(FETCH_INTERVAL)

def check_and_restart_threads():
    global request_thread
    while True:
        try:
            # 检查请求线程
            if request_thread is None or not request_thread.is_alive():
                logging.warning("RTJ price request thread died or not started, restarting...")
                request_thread = threading.Thread(target=request_rtj_price, daemon=True)
                request_thread.start()
                logging.info("RTJ price request thread restarted")

            # 检查 Kafka 生产者
            if kafka_producer is None:
                logging.warning("Kafka producer not initialized, attempting to reinitialize...")
                init_kafka_producer()

        except Exception as e:
            logging.error(f"Error in thread monitor: {e}")
            increment_metric("errors")
        time.sleep(30)

def health_check():
    last_check_time = 0
    check_interval = 30  # 增加健康检查间隔到30秒
    
    while True:
        try:
            current_time = time.time()
            if current_time - last_check_time >= check_interval:
                if kafka_producer is None:
                    update_component_status("rtj_kafka_producer", False)
                    # 尝试重新初始化生产者
                    init_kafka_producer()
                else:
                    try:
                        # 测试连接是否有效
                        kafka_producer.metrics()
                        update_component_status("rtj_kafka_producer", True)
                    except Exception:
                        update_component_status("rtj_kafka_producer", False)
                        # 尝试重新初始化生产者
                        init_kafka_producer()
                last_check_time = current_time
        except Exception as e:
            logging.error(f"Error in health check: {e}")
            increment_metric("errors")
        time.sleep(5)  # 缩短睡眠时间，但通过last_check_time控制实际检查间隔

def init_system():
    global request_thread, health_check_thread, monitor_thread, health_server
    
    # 初始化 Kafka 生产者
    if not init_kafka_producer():
        logging.error("Failed to initialize Kafka producer")
        return False
    
    # 启动健康检查服务器
    health_server = start_health_check_server(HEALTH_CHECK_PORT)
    
    # 启动工作线程
    request_thread = threading.Thread(target=request_rtj_price, daemon=True)
    health_check_thread = threading.Thread(target=health_check, daemon=True)
    monitor_thread = threading.Thread(target=check_and_restart_threads, daemon=True)
    
    # 启动所有线程
    request_thread.start()
    health_check_thread.start()
    monitor_thread.start()
    
    logging.info("All threads started successfully")
    return True

def main():
    logging.info("Starting RTJ price producer...")
    try:
        if init_system():
            logging.info("System initialized successfully")
            while True:
                time.sleep(1)
        else:
            logging.error("Failed to initialize system")
    except KeyboardInterrupt:
        logging.info("Received shutdown signal")
    except Exception as e:
        logging.error(f"Unexpected error in main thread: {e}")
    finally:
        # 清理资源
        if kafka_producer:
            try:
                kafka_producer.close()
                logging.info("Kafka producer closed")
            except Exception as e:
                logging.error(f"Error closing Kafka producer: {e}")

if __name__ == "__main__":
    main() 