import logging
from logging.handlers import RotatingFileHandler
import os
import asyncio

# TLV格式定义: Type(1字节) + Length(2字节) + Value(n字节)
# 消息类型定义
MSG_TYPE_REGISTER = 0x01  # 注册消息
MSG_TYPE_REGISTER_RESP = 0x02  # 注册响应
MSG_TYPE_CONFIG_UPDATE = 0x03  # 配置更新
MSG_TYPE_SESSION_NEW = 0x04      # 新会话消息
MSG_TYPE_SESSION_CLOSE = 0x05    # 会话关闭消息
MSG_TYPE_SESSION_DATA = 0x06      # 会话数据消息
MSG_TYPE_SESSION_CONNECTED = 0x07  # 会话连接确认
MSG_TYPE_HEARTBEAT = 0x08  # 保活消息

class TLVError(Exception):
    pass

def encode_tlv(type_, value):
    """将类型和值编码为TLV格式"""
    if not isinstance(type_, int) or type_ < 0 or type_ > 0xFF:
        raise TLVError("Type must be a byte (0-255)")
    if len(value) > 0xFFFF:
        raise TLVError("Value too long (max 65535 bytes)")
    type_byte = type_.to_bytes(1, byteorder='big')
    length_bytes = len(value).to_bytes(2, byteorder='big')
    return type_byte + length_bytes + value

async def decode_tlv(reader):
    """从reader中读取并解码TLV格式数据，增强错误处理和连接恢复"""
    # 读取类型 (1 byte)
    type_bytes = await read_exactly(reader, 1)
    if len(type_bytes) < 1:
        raise TLVError("Connection closed while reading type")
    type_ = int.from_bytes(type_bytes, byteorder='big')
    # 读取长度 (2 bytes)
    length_bytes = await read_exactly(reader, 2)
    if len(length_bytes) < 2:
        raise TLVError("Incomplete TLV length field")
    length = int.from_bytes(length_bytes, byteorder='big')
    # 验证长度合理性，防止内存溢出攻击
    if length > 60000: 
        raise TLVError(f"TLV value too large ({length} bytes)")
    # 读取值
    value = await read_exactly(reader, length)
    return type_, value

async def read_exactly(reader, n):
    """Read exactly n bytes with robust error handling and connection recovery"""
    buffer = bytearray()
    while len(buffer) < n:
        remaining = n - len(buffer)
        try:
            chunk = await reader.read(remaining)
            if not chunk:
                # 连接关闭且未读取足够数据
                raise TLVError(f"Connection closed prematurely after reading {len(buffer)}/{n} bytes")
            buffer.extend(chunk)
            # log.debug(f"Read {len(chunk)} bytes (total: {len(buffer)}/{n})")
        except asyncio.TimeoutError:
            raise TLVError(f"Timeout after {len(buffer)}/{n} bytes read")
        except Exception as e:
            raise TLVError(f"Error reading data: {str(e)}") from e
    return buffer


def get_logger(tag="app"):
    log = logging.getLogger(tag)
    if log.handlers:
        return log
    log.setLevel(logging.DEBUG)
    formatter = logging.Formatter("[%(asctime)s]-[%(filename)s:%(lineno)d] %(levelname)-4s: %(message)s")
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    log.addHandler(console_handler)
    # Set up file logging
    log_dir = os.path.dirname(os.path.abspath(__file__))
    log_file = os.path.join(log_dir, f'{tag}.log')
    # Create rotating file handler to limit log size and keep backups
    file_handler = RotatingFileHandler(
        log_file,
        maxBytes=10*1024*1024,  # 10MB per file
        backupCount=5,           # Keep up to 5 backup logs
        encoding='utf-8'
    )
    file_handler.setFormatter(formatter)
    file_handler.setLevel(logging.INFO)
    # Add file handler to existing logger
    log.addHandler(file_handler)  # Use existing log instance instead of root logger
    log.info("logger file: %s", log_file)
    return log
