import logging
import os
import requests
import threading
import time
from datetime import datetime
from json import dumps
from kafka import KafkaProducer
from kafka.errors import KafkaTimeoutError, KafkaConnectionError, NoBrokersAvailable

from health_check import start_health_check_server, update_component_status, increment_metric

# 配置日志
log_dir = '/root/server/price-server/logs'
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

logging.basicConfig(
    filename=os.path.join(log_dir, 'producer.log'),
    level=logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO')),
    format='%(asctime)s - %(levelname)s - %(message)s'
)

# 定义枚举类
class MetalType:
    GOLD_GRAM = 1301
    SILVER_GRAM = 1302
    PLATINUM_GRAM = 1303
    PALLADIUM_GRAM = 1304

    @staticmethod
    def get_name(type_id):
        for attr in dir(MetalType):
            if isinstance(getattr(MetalType, attr), int) and getattr(MetalType, attr) == type_id:
                return attr
        return "Unknown"

metal_name = {
    1301: 'gold',
    1302: 'silver',
    1303: 'platinum',
    1304: 'palladium',
}

# 配置 - 从环境变量获取
KAFKA_SERVERS = os.environ.get('KAFKA_SERVERS', 'kafka:9094').split(',')
TOPIC_PREFIX = os.environ.get('TOPIC_PREFIX', 'LIVE_PRICE_')
FETCH_INTERVAL = int(os.environ.get('FETCH_INTERVAL', 10))  # 请求间隔，秒
GOLD_FETCH_INTERVAL = int(os.environ.get('GOLD_FETCH_INTERVAL', 11))  # 黄金请求间隔，秒
OTHER_METALS_FETCH_INTERVAL = int(os.environ.get('OTHER_METALS_FETCH_INTERVAL', 160))  # 其他金属请求间隔，秒
MAX_RETRIES = int(os.environ.get('MAX_RETRIES', 3))  # 最大重试次数
HEALTH_CHECK_PORT = int(os.environ.get('HEALTH_CHECK_PORT', 8000))  # 健康检查端口

# 全局组件
kafka_producer = None
gold_thread = None
other_metals_thread = None
health_check_thread = None
monitor_thread = None
health_server = None

class NowApiLivePriceDTO:
    def __init__(self, **kwargs):
        self.type_id = None
        self.timestamp = int(time.time() * 1000)
        for key, value in kwargs.items():
            setattr(self, key, value)

def init_kafka_producer():
    global kafka_producer
    try:
        # 关闭现有的生产者
        if kafka_producer is not None:
            try:
                kafka_producer.close()
                logging.info("Closed existing Kafka producer")
            except Exception as e:
                logging.error(f"Error closing existing Kafka producer: {e}")

        # 创建新的生产者
        kafka_producer = KafkaProducer(
            bootstrap_servers=KAFKA_SERVERS,
            # 可靠性配置
            acks=1,  # 修改为整数值 1，而不是字符串 '1'
            retries=2,
            retry_backoff_ms=100,
            
            # 性能优化配置
            batch_size=0,
            linger_ms=0,
            buffer_memory=2048,
            
            # 连接配置
            request_timeout_ms=10000,
            api_version_auto_timeout_ms=10000,
            connections_max_idle_ms=300000,  # 5分钟
            max_block_ms=30000,
            metadata_max_age_ms=300000,  # 5分钟
            
            # 重连配置
            reconnect_backoff_ms=100,
            reconnect_backoff_max_ms=1000,
            
            # 序列化配置
            key_serializer=lambda x: str(x).encode('utf-8'),
            value_serializer=lambda x: dumps(x).encode('utf-8'),
            security_protocol='PLAINTEXT'
        )
        
        # 测试连接
        kafka_producer.metrics()
        logging.info(f"Kafka producer initialized successfully with servers: {KAFKA_SERVERS}")
        update_component_status("kafka_producer", True)
        return True
    except Exception as e:
        logging.error(f"Failed to initialize Kafka producer: {e}")
        update_component_status("kafka_producer", False)
        increment_metric("errors")
        return False

def get_live_price_now_api(type_id, retries=MAX_RETRIES, retry_delay=5):
    host = "https://sapi.k780.com/"
    params = {
        "app": "finance.gold_price",
        "format": "json",
        "goldid": str(type_id),
        "appkey": "73174",
        "sign": "57f6c9fa2161fe9604d6e21fdb180f82"
    }

    increment_metric("requests_total")
    
    for attempt in range(retries):
        try:
            logging.info(f"Requesting data for type {type_id} (attempt {attempt + 1}/{retries})")
            response = requests.get(host, params=params, timeout=10)
            response.raise_for_status()

            data = response.json()
            logging.debug(f"Raw response for type {type_id}: {data}")

            # 检查 API 返回状态
            if data.get("success") != "1":
                error_msg = data.get("msg", "Unknown error")
                logging.error(f"API returned error for type {type_id}: {error_msg}")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            result = data.get("result", {})
            if not result:
                logging.error(f"Empty result for type {type_id}")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            dt_list = result.get("dtList", {})
            if not dt_list:
                logging.error(f"Empty dtList for type {type_id}")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            live_price_data = dt_list.get(str(type_id), {})
            if not live_price_data:
                logging.error(f"No data found for type {type_id} in dtList")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            # 验证必要字段
            required_fields = ["last_price", "buy_price", "sell_price", "uptime"]
            missing_fields = [field for field in required_fields if field not in live_price_data]
            if missing_fields:
                logging.error(f"Missing required fields for type {type_id}: {missing_fields}")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            # 创建对象并添加元数据
            res = NowApiLivePriceDTO(**live_price_data)
            res.type_id = type_id
            try:
                res.timestamp = int(datetime.strptime(res.uptime, "%Y-%m-%d %H:%M:%S").timestamp() * 1000)
            except ValueError as e:
                logging.error(f"Invalid uptime format for type {type_id}: {res.uptime}")
                if attempt < retries - 1:
                    time.sleep(retry_delay)
                    continue
                return None

            logging.info(f"Successfully retrieved data for type {type_id}: {res.__dict__}")
            return res

        except requests.RequestException as e:
            increment_metric("errors")
            logging.error(f"请求错误 (尝试 {attempt+1}/{retries}): {e}")
        except ValueError as e:
            increment_metric("errors")
            logging.error(f"解析JSON错误 (尝试 {attempt+1}/{retries}): {e}")
        except Exception as e:
            increment_metric("errors")
            logging.error(f"未知错误 (尝试 {attempt+1}/{retries}): {e}")
        
        if attempt < retries - 1:
            logging.info(f"Retrying in {retry_delay} seconds...")
            time.sleep(retry_delay)
    
    logging.error(f"Failed to get data for type {type_id} after {retries} attempts")
    return None

def publish_to_kafka(data, type_id):
    if kafka_producer is None:
        logging.error("Kafka producer not initialized")
        if not init_kafka_producer():
            return False
    
    topic = f'{TOPIC_PREFIX}{type_id}'
    try:
        # 使用异步发送，不等待确认
        future = kafka_producer.send(
            topic,
            key=str(type_id),
            value=data.__dict__,
            timestamp_ms=int(time.time() * 1000)  # 添加时间戳
        )
        
        # 添加回调处理
        def on_send_success(record_metadata):
            logging.debug(f"Data published to Kafka topic {topic}: partition={record_metadata.partition}, offset={record_metadata.offset}")
        increment_metric("kafka_messages_sent")
            
        def on_send_error(excp):
            logging.error(f"Failed to publish data to Kafka topic {topic}: {excp}")
            increment_metric("errors")
            if isinstance(excp, (KafkaTimeoutError, KafkaConnectionError, NoBrokersAvailable)):
                logging.warning("Connection error detected, attempting to reinitialize Kafka producer")
                init_kafka_producer()
        
        future.add_callback(on_send_success).add_errback(on_send_error)
        
        # 定期刷新确保消息发送
        kafka_producer.flush(timeout=1)
        return True
    except Exception as e:
        logging.error(f"Failed to publish data to Kafka topic {topic}: {e}")
        increment_metric("errors")
        if isinstance(e, (KafkaTimeoutError, KafkaConnectionError, NoBrokersAvailable)):
            logging.warning("Connection error detected, attempting to reinitialize Kafka producer")
            init_kafka_producer()
        return False

def process_new_data(live_price, type_id):
    if live_price is None:
        return
    
    # 发布到 Kafka
    if publish_to_kafka(live_price, type_id):
        logging.info(f"Successfully processed data for type {type_id}")
    else:
        logging.error(f"Failed to process data for type {type_id}")

def request_gold():
    while True:
        try:
            data = get_live_price_now_api(MetalType.GOLD_GRAM)
            if data is None:
                logging.warning("Failed to get gold price data, will retry in next interval")
            else:
                process_new_data(data, MetalType.GOLD_GRAM)
        except Exception as e:
            logging.error(f"Error in gold request thread: {e}")
            increment_metric("errors")
        time.sleep(GOLD_FETCH_INTERVAL)

def request_other_metals():
    while True:
        try:
            for type_id in [MetalType.SILVER_GRAM, MetalType.PLATINUM_GRAM, MetalType.PALLADIUM_GRAM]:
                data = get_live_price_now_api(type_id)
                if data is None:
                    logging.warning(f"Failed to get price data for type {type_id}, will retry in next interval")
                else:
                    process_new_data(data, type_id)
        except Exception as e:
            logging.error(f"Error in other metals request thread: {e}")
            increment_metric("errors")
        time.sleep(OTHER_METALS_FETCH_INTERVAL)

def check_and_restart_threads():
    global gold_thread, other_metals_thread
    while True:
        try:
            # 检查 gold 线程
            if gold_thread is None or not gold_thread.is_alive():
                logging.warning("Gold thread died or not started, restarting...")
                gold_thread = threading.Thread(target=request_gold, daemon=True)
                gold_thread.start()
                logging.info("Gold thread restarted")
            
            # 检查 other_metals 线程
            if other_metals_thread is None or not other_metals_thread.is_alive():
                logging.warning("Other metals thread died or not started, restarting...")
                other_metals_thread = threading.Thread(target=request_other_metals, daemon=True)
                other_metals_thread.start()
                logging.info("Other metals thread restarted")

            # 检查 Kafka 生产者
            if kafka_producer is None:
                logging.warning("Kafka producer not initialized, attempting to reinitialize...")
                init_kafka_producer()

        except Exception as e:
            logging.error(f"Error in thread monitor: {e}")
            increment_metric("errors")
        time.sleep(30)

def health_check():
    while True:
        try:
            if kafka_producer is None:
                update_component_status("kafka_producer", False)
            else:
                update_component_status("kafka_producer", True)
        except Exception as e:
            logging.error(f"Error in health check: {e}")
            increment_metric("errors")
        time.sleep(10)

def init_system():
    global gold_thread, other_metals_thread, health_check_thread, monitor_thread, health_server
    
    # 初始化 Kafka 生产者
    if not init_kafka_producer():
        logging.error("Failed to initialize Kafka producer")
        return False
    
    # 启动健康检查服务器
    health_server = start_health_check_server(HEALTH_CHECK_PORT)
    
    # 启动工作线程
    gold_thread = threading.Thread(target=request_gold, daemon=True)
    other_metals_thread = threading.Thread(target=request_other_metals, daemon=True)
    health_check_thread = threading.Thread(target=health_check, daemon=True)
    monitor_thread = threading.Thread(target=check_and_restart_threads, daemon=True)
    
    # 启动所有线程
    gold_thread.start()
    other_metals_thread.start()
    health_check_thread.start()
    monitor_thread.start()
    
    logging.info("All threads started successfully")
    return True

def main():
    logging.info("Starting price server...")
    try:
        if init_system():
            logging.info("System initialized successfully")
            while True:
                time.sleep(1)
        else:
            logging.error("Failed to initialize system")
    except KeyboardInterrupt:
        logging.info("Received shutdown signal")
    except Exception as e:
        logging.error(f"Unexpected error in main thread: {e}")
    finally:
        # 清理资源
        if kafka_producer:
            try:
                kafka_producer.close()
                logging.info("Kafka producer closed")
            except Exception as e:
                logging.error(f"Error closing Kafka producer: {e}")

if __name__ == "__main__":
    main()