import time
from tqdm import tqdm
import logging
import argparse
import os
import torch
import torch.optim as optim
import numpy as np

# 删除不必要的导入（如 nni）
# import nni  # 可删除，除非您用 NNI 做超参搜索

from utils.utils import save_json_data, create_dir, load_pkl_data
from common.mbr import MBR
from common.spatial_func import SPoint, distance
from common.road_network import load_rn_shp
from torch.optim.lr_scheduler import StepLR

from utils.datasets import Dataset, collate_fn, LoadData
from models.model_utils import load_rn_dict, load_rid_freqs, get_rid_grid, get_poi_info, get_rn_info
from models.model_utils import get_online_info_dict, epoch_time, AttrDict, get_rid_rnfea_dict
from models.multi_train import init_weights, train
from models.model import Diff_RNTraj
from models.diff_module import diff_CSDI
from build_graph import load_graph_adj_mtx, load_graph_node_features

import warnings
import json
import pickle

warnings.filterwarnings("ignore", category=UserWarning)

if __name__ == '__main__':
    
    # ================== 固定使用 geolife 数据集 ==================
    # 删除 parser，或保留但设置默认值
    parser = argparse.ArgumentParser(description='Train on GeoLife dataset only')
    parser.add_argument('--hid_dim', type=int, default=512, help='hidden dimension')
    parser.add_argument('--epochs', type=int, default=30, help='epochs')
    parser.add_argument('--batch_size', type=int, default=256, help='batch size')
    parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
    parser.add_argument('--diff_T', type=int, default=500, help='diffusion step')
    parser.add_argument('--beta_start', type=float, default=0.0001, help='min beta')
    parser.add_argument('--beta_end', type=float, default=0.02, help='max beta')
    parser.add_argument('--pre_trained_dim', type=int, default=64, help='pre-trained dim of the road segment')
    parser.add_argument('--rdcl', type=int, default=10, help='stack layers on the denoise network')

    opts = parser.parse_args()

    # 删除这行，或改为 "0"（如果您有多卡）
    # os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    # ================== GeoLife 专属参数 ==================
    args = AttrDict()
    args_dict = {
        'dataset': 'geolife',

        # MBR (北京地区)
        'min_lat': 39.680,
        'min_lng': 116.080,
        'max_lat': 40.180,
        'max_lng': 116.770,
        'grid_size': 50,

        # 模型参数
        'hid_dim': opts.hid_dim,
        'id_size': 38682 + 1,  # 根据real_usage_network调整
        'n_epochs': opts.epochs,
        'batch_size': opts.batch_size,
        'learning_rate': opts.lr,
        'tf_ratio': 0.5,
        'clip': 1,
        'log_step': 1,

        'diff_T': opts.diff_T,
        'beta_start': opts.beta_start,
        'beta_end': opts.beta_end,
        'pre_trained_dim': opts.pre_trained_dim,
        'rdcl': opts.rdcl
    }
    args.update(args_dict)

    print('Preparing data for GeoLife...')

    # ================== 路径配置 ==================
    path_dir = './data/'  # 确保这是您存放 geolife 数据的根目录
    extra_info_dir = path_dir + "extra_file/"
    rn_dir = path_dir + "road_network/"
    UTG_file = path_dir + 'graph/graph_A.csv'
    pre_trained_road = path_dir + 'graph/road_embed.txt'

    test_flag = True  # 或 False
    if test_flag:
        train_trajs_dir = path_dir + 'Top30_beijing_convert/'
    else:
        train_trajs_dir = path_dir + 'Top30_beijing_convert/'

    model_save_path = './results/geolife/'
    create_dir(model_save_path)

    # 日志配置
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s %(levelname)s %(message)s',
        filename=model_save_path + 'log.txt',
        filemode='a'
    )

    # ================== 扩散过程参数 ==================
    beta = np.linspace(opts.beta_start ** 0.5, opts.beta_end ** 0.5, opts.diff_T) ** 2
    alpha = 1 - beta
    alpha_bar = np.cumprod(alpha)

    alpha = torch.tensor(alpha).float().to(device)
    alpha_bar = torch.tensor(alpha_bar).float().to(device)

    diffusion_hyperparams = {
        'T': opts.diff_T,
        'alpha_bar': alpha_bar,
        'alpha': alpha,
        'beta': beta
    }

    # ================== 图结构 & 嵌入 ==================
    # 使用real_usage_network而不是graph网络
    real_usage_dir = path_dir + "real_usage_network/"
    real_usage_A_file = real_usage_dir + "real_usage_adjacency_matrix.npz"
    real_usage_node_mapping_file = real_usage_dir + "node_mapping.json"
    
    # 加载real_usage_network的邻接矩阵
    from scipy.sparse import csr_matrix
    real_usage_A = np.load(real_usage_A_file)
    # 从稀疏矩阵格式重建邻接矩阵
    spatial_A = csr_matrix((real_usage_A['data'], real_usage_A['indices'], real_usage_A['indptr']), 
                          shape=real_usage_A['shape']).toarray()
    spatial_A_trans = np.zeros((spatial_A.shape[0]+1, spatial_A.shape[1]+1)) + 1e-10
    spatial_A_trans[1:, 1:] = spatial_A

    # 加载real_usage_network的节点映射
    with open(real_usage_node_mapping_file, 'r') as f:
        node_mapping = json.load(f)
    
    # 创建节点嵌入（使用预训练的road embedding）
    with open(pre_trained_road, 'r') as f:
        lines = f.readlines()
        temp = lines[0].split(' ')
        N, dims = int(temp[0]) + 1, int(temp[1])
        SE = np.zeros((N, dims), dtype=np.float32)
        for line in lines[1:]:
            temp = line.split(' ')
            index = int(temp[0])
            SE[index + 1] = [float(x) for x in temp[1:]]

    SE = torch.from_numpy(SE).to(device)  # 移到 GPU

    # 加载路网
    rn = load_rn_shp(rn_dir, is_directed=True)
    raw_rn_dict = load_rn_dict(extra_info_dir, file_name='raw_rn_dict.json')
    new2raw_rid_dict = load_rid_freqs(extra_info_dir, file_name='new2raw_rid.json')
    raw2new_rid_dict = load_rid_freqs(extra_info_dir, file_name='raw2new_rid.json')
    rn_dict = load_rn_dict(extra_info_dir, file_name='rn_dict.json')

    mbr = MBR(args.min_lat, args.min_lng, args.max_lat, args.max_lng)
    grid_rn_dict, max_xid, max_yid = get_rid_grid(mbr, args.grid_size, rn_dict)
    args.max_xid = max_xid
    args.max_yid = max_yid

    print(args)
    logging.info(args)
    with open(model_save_path + 'logging.txt', 'w') as f:
        f.write(str(args) + '\n')

    # ================== 加载轨迹数据 ==================
    with open(train_trajs_dir + 'eid_seqs.bin', 'rb') as f:
        all_src_eid_seqs = pickle.load(f)
    with open(train_trajs_dir + 'rate_seqs.bin', 'rb') as f:
        all_src_rate_seqs = pickle.load(f)

    # ================== 模型初始化 ==================
    diff_model = diff_CSDI(
        channels=args.hid_dim,          # 对应 channels
        inputdim=args.hid_dim,          # 对应 inputdim
        num_steps=opts.diff_T,          # 对应 num_steps
        embedding_dim=args.hid_dim,     # 对应 embedding_dim
        pre_dim=args.pre_trained_dim,   # 对应 pre_dim
        rdcl=opts.rdcl                  # 对应 rdcl
    )

    model = Diff_RNTraj(diff_model, diffusion_hyperparams).to(device)
    model.apply(init_weights)

    print('Model:', model)
    logging.info('Model: ' + str(model))
    with open(model_save_path + 'logging.txt', 'a+') as f:
        f.write('Model: ' + str(model) + '\n')

    # ================== 优化器 & 训练 ==================
    log_vars = [torch.zeros((1,), requires_grad=True, device=device) for _ in range(2)]
    optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate)
    scheduler = StepLR(optimizer, step_size=3, gamma=0.5)

    best_loss = float('inf')
    dict_train_loss = {'train_ttl_loss': [], 'train_const_loss': [], 'train_diff_loss': [], 'train_x0_loss': []}

    for epoch in tqdm(range(args.n_epochs)):
        start_time = time.time()
        new_log_vars, train_loss, train_const_loss, train_diff_loss, train_x0_loss = \
            train(model, spatial_A_trans, SE, all_src_eid_seqs, all_src_rate_seqs,
                  optimizer, log_vars, args, diffusion_hyperparams)

        scheduler.step()

        # 记录损失
        for lst, val in zip(
            [dict_train_loss['train_ttl_loss'], dict_train_loss['train_const_loss'],
             dict_train_loss['train_diff_loss'], dict_train_loss['train_x0_loss']],
            [train_loss, train_const_loss, train_diff_loss, train_x0_loss]
        ):
            lst.append(val)

        end_time = time.time()
        epoch_mins, epoch_secs = epoch_time(start_time, end_time)

        if train_loss < best_loss:
            best_loss = train_loss
            torch.save(model.state_dict(), model_save_path + 'val-best-model.pt')
            print("Saved best model.")

        # 日志记录
        logging.info(f'Epoch: {epoch+1} Time: {epoch_mins}m {epoch_secs}s')
        logging.info(f'\tTrain Loss: {train_loss:.4f}, '
                     f'Const: {train_const_loss:.4f}, '
                     f'Diff: {train_diff_loss:.4f}, '
                     f'X0: {train_x0_loss:.4f}')

        torch.save(model.state_dict(), model_save_path + 'train-mid-model.pt')
        save_json_data(dict_train_loss, model_save_path, "train_loss.json")