import os
import logging
import torch
from fusion_vrp.models import PositionEncoder, ActorDecoder, CriticEncoder, PointerNet, TransformerTSP, BertEncoder
from fusion_vrp.training import (
    plot_training_results, hierarchical_train,
    pointer_train_on_groups, pointer_validate_and_plot
)
from fusion_vrp.training.transformer_tsp import transformer_train_on_groups, transformer_validate_and_plot
from fusion_vrp.config import TrainingConfig
from scripts.draw_gantt import draw_gantt_from_results

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


def main():
    # Initialize configuration (edit values directly in config.py or here)
    config = TrainingConfig()
    run_dir = config.prepare_run()
    # Log effective supervised pretrain config to ensure CLI overrides took effect
    # logging.info(f"transformer_supervised_pretrain={config.transformer_supervised_pretrain}")
    # logging.info(f"transformer_supervised_epochs={config.transformer_supervised_epochs}")
    # logging.info(f"transformer_supervised_batch_size={config.transformer_supervised_batch_size}")
    # logging.info(f"transformer_supervised_lr={config.transformer_supervised_lr}")
    # logging.info(f"transformer_supervised_max_group_size={config.transformer_supervised_max_group_size}")
    logging.info(f"本次训练输出目录: {run_dir}")
    logging.info(f"使用设备: {config.device} (use_gpu={config.use_gpu}, cuda_available={torch.cuda.is_available()})")

    # Shared position encoder for grouping (fusion) only
    pos_encoder = PositionEncoder(
        input_dim=config.pos_input_dim,
        d_model=config.pos_d_model,
        nhead=config.pos_nhead,
        num_layers=config.pos_num_layers,
        max_nodes=config.pos_max_nodes
    ).to(config.device)


    # Fallback to previous pipelines
    actor = ActorDecoder(config.actor_input_dim, config.actor_hidden_dim).to(config.device)
    critic = CriticEncoder(config.critic_input_dim, config.critic_hidden_dim).to(config.device)

    #critic decoder
    criticDecoder = CriticEncoder(config.critic_input_dim, config.critic_hidden_dim).to(config.device)

    if config.training_mode == "hierarchical":
        logging.info("进入层次训练模式 (closed tour + geometry alignment)")
        losses, closed_lengths = hierarchical_train(config, actor, critic, pos_encoder)
        plot_training_results(losses, closed_lengths, run_dir)
        # Store metrics explicitly
        config.save_metrics(losses, closed_lengths)
        # 在层次模式下也保存权重（放在 checkpoints）


    elif config.training_mode == "pointer":
        logging.info("进入 Pointer-Net 组内 TSP 训练路径 (REINFORCE + 贪心基线 + 可选 2-opt)")
        pointer_model = PointerNet(
            input_dim=2,
            embed_dim=128,
            enc_hidden=128,
            dec_hidden=128,
            dropout=0.1,
        ).to(config.device)
        losses, lengths = pointer_train_on_groups(config, pointer_model, pos_encoder,criticDecoder)
        config.save_metrics(losses, lengths)
        # Plot training losses and average closed lengths to the run directory for quick inspection
        try:
            plot_training_results(losses, lengths, run_dir)
            logging.info(f"Pointer training curves saved to {run_dir}")
        except Exception as e:
            logging.warning(f"Failed to plot pointer training results: {e}")


        # pointer 评估
        out_path, results,g = pointer_validate_and_plot(config, pointer_model, pos_encoder,
                                                      candidates_k=config.pointer_eval_attempts,
                                                      apply_two_opt=config.two_opt_enabled)
        logging.info(f"Pointer 评估可视化保存到: {out_path}")
        #
        gantt_path = os.path.join(run_dir, 'gantt.png')
        # use the concise helper that imports and calls draw_gantt (helper enforces MSB mapping)
        draw_gantt_from_results(results, g, gantt_path, num_vehicles=3)
        #
        ckpt_dir = os.path.join(run_dir, 'checkpoints')
        # ============add log
        # Assumptions: out_path already exists and g.ndata['goods'] exists.
        log_file = os.path.join(run_dir, 'result.txt')

        # Read goods tensor (must exist) and convert to a python list of floats
        goods_tensor = g.ndata['goods']
        if isinstance(goods_tensor, torch.Tensor):
            goods_cpu = goods_tensor.detach().cpu()
            if goods_cpu.dim() > 1:
                goods_cpu = goods_cpu.view(-1)
            # store as ints for logging to avoid float display/rounding issues
            goods_list = [int(x.item()) for x in goods_cpu]
        else:
            goods_list = [int(x) for x in list(goods_tensor)]

        def _flatten_sequence(seq):
            out = []
            for item in seq:
                if isinstance(item, (list, tuple)):
                    for v in item:
                        try:
                            out.append(int(v))
                        except Exception:
                            pass
                else:
                    try:
                        out.append(int(item))
                    except Exception:
                        pass
            return out

        # write the first `stop_num` routes
        n_routes = int(getattr(config, 'stop_num', 3))
        n_avail = len(results) if isinstance(results, (list, tuple)) else 0
        to_take = min(n_routes, n_avail)

        with open(log_file, 'w', encoding='utf-8') as fh:
            fh.write(f"Routes log (first {to_take})\n")
            # fh.write(f"Out path: {out_path}\n")
            fh.write('\n')
            # --- 新增：在路线之前输出全部节点信息（goods 与 available_window 的二进制表示）
            try:
                fh.write('All nodes goods (int):\n')
                fh.write(','.join(str(x) for x in goods_list) + '\n\n')
            except Exception:
                # fallback: best-effort
                try:
                    fh.write('All nodes goods: ' + str(list(goods_list)) + '\n\n')
                except Exception:
                    pass

            try:
                aw_tensor = g.ndata.get('available_window', None)
                if aw_tensor is not None:
                    # normalize to 1-D python ints
                    if isinstance(aw_tensor, torch.Tensor):
                        aw_cpu = aw_tensor.detach().cpu()
                        if aw_cpu.dim() > 1:
                            aw_cpu = aw_cpu.view(-1)
                        aw_ints = [int(x.item()) for x in aw_cpu]
                    else:
                        # could be numpy or list
                        try:
                            aw_ints = [int(x) for x in list(aw_tensor)]
                        except Exception:
                            aw_ints = []

                    if aw_ints:
                        max_val = max(aw_ints)
                        bitwidth = max(1, max_val.bit_length())
                        bitwidth = max(bitwidth, 10)  # 至少 10 位以匹配原始 0..1023 的范围
                        aw_bits = [format(v, '0{}b'.format(bitwidth)) for v in aw_ints]
                        fh.write(f"All nodes available_window (int -> {bitwidth}-bit):\n")
                        for idx, (v, b) in enumerate(zip(aw_ints, aw_bits)):
                            fh.write(f"  node {idx}: {v} -> {b}\n")
                        fh.write('\n')
            except Exception:
                # non-fatal: continue to write routes below
                pass

            for r_i in range(to_take):
                entry = results[r_i]
                # Support multiple possible result formats:
                #  - tuple/list like (route_list, length, goods_sum)
                #  - dict like {'route': route_list, 'length':..., 'goods':...}
                #  - or a plain route list
                if isinstance(entry, dict):
                    route = entry.get('route', [])
                    plen = entry.get('length', None)
                    goods_sum = entry.get('goods', None)
                elif isinstance(entry, (list, tuple)) and len(entry) > 0 and isinstance(entry[0], (list, tuple)):
                    route = entry[0]
                    plen = entry[1] if len(entry) > 1 else None
                    goods_sum = entry[2] if len(entry) > 2 else None
                elif isinstance(entry, (list, tuple)) and all(isinstance(x, int) for x in entry):
                    # entry is directly a list of node indices
                    route = list(entry)
                    plen = None
                    goods_sum = None
                else:
                    # fallback: try to flatten whatever was provided
                    route = _flatten_sequence(entry)
                    plen = None
                    goods_sum = None

                fh.write(f"Route {r_i + 1}:\n")
                if goods_sum is not None:
                    fh.write(f"  (route reported goods_sum={goods_sum})\n")
                cum = 0.0
                visited = []  # flattened visited node indices so far
                for step_idx, node in enumerate(route):
                    # node may be int-like or a list/tuple of ints
                    current = []
                    if isinstance(node, (list, tuple)):
                        for v in node:
                            try:
                                current.append(int(v))
                            except Exception:
                                pass
                    else:
                        try:
                            current.append(int(node))
                        except Exception:
                            pass

                    # extend visited list
                    visited.extend(current)

                    # compute sum of goods for visited nodes up to this step (integer)
                    step_sum = 0
                    for vid in visited:
                        if 0 <= vid < len(goods_list):
                            step_sum += int(goods_list[vid])

                    # format visited nodes as comma-separated
                    visited_str = ','.join(str(v) for v in visited)
                    fh.write(f"  step {step_idx + 1}: 访问节点{visited_str}，合计载重{step_sum}\n")
                fh.write('\n')
        logging.info(f"Saved route logs to {log_file}")
        # ============add log

        os.makedirs(ckpt_dir, exist_ok=True)
        torch.save(pointer_model.state_dict(), os.path.join(ckpt_dir, 'pointer_tsp.pth'))
        torch.save(pos_encoder.state_dict(), os.path.join(ckpt_dir, 'pos_encoder.pth'))
        return
    else:
        # pipeline 训练模式 -> 使用 Transformer 替换 Pointer 的训练流程（复用 pointer_train_on_groups）
        logging.info("使用 TransformerTSP 在 pipeline 模式下训练（复用 Pointer 训练流程）")
        transformer_model = TransformerTSP(
            input_dim=2,
            d_model=128,
            nhead=8,
            num_encoder_layers=3,
            dim_feedforward=256,
            dropout=0.1,
            max_nodes=getattr(config, 'pos_max_nodes', 128),
        ).to(config.device)

        # Create an encoder identical to Transformer's encoder to be used as `bert` (pos encoder replacement for transformer training)
        bert = BertEncoder(
            input_dim=2,
            d_model=128,
            nhead=8,
            num_layers=3,
            max_nodes=getattr(config, 'pos_max_nodes', 128),
            output_dim=getattr(config, 'pos_output_dim', 64)
        ).to(config.device)

        # Use the dedicated Transformer trainer (isolated from pointer training)
        losses, path_lengths = transformer_train_on_groups(config, transformer_model, bert, criticDecoder)

        # save checkpoints
        ckpt_dir = os.path.join(run_dir, 'checkpoints')
        os.makedirs(ckpt_dir, exist_ok=True)
        torch.save(transformer_model.state_dict(), os.path.join(ckpt_dir, 'transformer_tsp.pth'))
        torch.save(bert.state_dict(), os.path.join(ckpt_dir, 'bert_encoder.pth'))
        torch.save(pos_encoder.state_dict(), os.path.join(ckpt_dir, 'pos_encoder.pth'))
        logging.info(f"Transformer 模型已保存到 {ckpt_dir}")

        # Plot and save metrics
        try:
            plot_training_results(losses, path_lengths, run_dir)
        except Exception:
            logging.warning("绘图失败: 无法保存训练曲线")
        config.save_metrics(losses, path_lengths)

        # Evaluate and visualize using the dedicated Transformer validation utility
        try:
            out_path, results, g = transformer_validate_and_plot(config, transformer_model, bert,
                                                                 candidates_k=getattr(config, 'pointer_eval_attempts', 16),
                                                                 apply_two_opt=getattr(config, 'two_opt_enabled', False))
            logging.info(f"Transformer 评估可视化保存到: {out_path}")
        except Exception as e:
            logging.warning(f"Transformer 评估/可视化失败: {e}")

        return




if __name__ == '__main__':
    main()
