import itertools
from collections import defaultdict
from pathlib import Path
import argparse
import os
import json
import torch
import numpy as np

import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable

from ase.neighborlist import natural_cutoffs, NeighborList
from ase.io import read, Trajectory

# optional. nglview for visualization
# import nglview as nv

"""
functions for loading simulated trajectories and computing observables.
"""

def get_thermo(filename):
    """
    read thermo logs.
    """
    with open(filename, 'r') as f:
        thermo = f.read().splitlines()
        sim_time, Et, Ep, Ek, T = [], [], [], [], []
        for i in range(1, len(thermo)):
            try:
                t, Etot, Epot, Ekin, Temp = [float(x) for x in thermo[i].split(' ') if x]
                sim_time.append(t)
                Et.append(Etot)
                Ep.append(Epot)
                Ek.append(Ekin)
                T.append(Temp)
            except:
                sim_time, Et, Ep, Ek, T = [], [], [], [], []
    thermo = {
        'time': sim_time,
        'Et': Et,
        'Ep': Ep,
        'Ek': Ek,
        'T': T
    }
    return thermo

def get_test_metrics(md_dir):
    """
    read test metrics such as force error.
    """
    run_metrics = {}
    with open(md_dir / 'test_metric.json', 'r') as f:
        test_metric = json.load(f)
        
        if 'mae_f' in test_metric:
            fmae = test_metric['mae_f']
            run_metrics['fmae'] = fmae
        elif 'f_mae' in test_metric:
            fmae = test_metric['f_mae']
            run_metrics['fmae'] = fmae
        elif 'forces_mae' in test_metric:
            fmae = test_metric['forces_mae']['metric']
            run_metrics['fmae'] = fmae
            
        if 'mae_e' in test_metric:
            emae = test_metric['mae_e']
            run_metrics['emae'] = emae
        elif 'e_mae' in test_metric:
            emae = test_metric['e_mae']
            run_metrics['emae'] = emae
        elif 'energy_mae' in test_metric:
            emae = test_metric['energy_mae']['metric']
            run_metrics['emae'] = emae
            
        if 'num_params' in test_metric:
            run_metrics['n_params'] = test_metric['num_params']
        if 'running_time' in test_metric:
            run_metrics['running_time'] = test_metric['running_time']
    return run_metrics

def mae(x, y, factor):
    return np.abs(x-y).mean() * factor

def distance_pbc(x0, x1, lattices):
    delta = torch.abs(x0 - x1)
    lattices = lattices.view(-1,1,3)
    delta = torch.where(delta > 0.5 * lattices, delta - lattices, delta)
    return torch.sqrt((delta ** 2).sum(dim=-1))

def get_diffusivity_traj(pos_seq, dilation=1):
    """
    Input: B x N x T x 3
    Output: B x T
    """
    # substract CoM
    bsize, time_steps = pos_seq.shape[0], pos_seq.shape[2]
    pos_seq = pos_seq - pos_seq.mean(1, keepdims=True)
    msd = (pos_seq[:, :, 1:] - pos_seq[:, :, 0].unsqueeze(2)).pow(2).sum(dim=-1).mean(dim=1)
    diff = msd / (torch.arange(1, time_steps)*dilation) / 6
    return diff.view(bsize, time_steps-1)

def get_smoothed_diff(xyz):
    seq_len = xyz.shape[0] - 1
    diff = torch.zeros(seq_len)
    for i in range(seq_len):
        diff[:seq_len-i] += get_diffusivity_traj(xyz[i:].transpose(0, 1).unsqueeze(0)).flatten()
    diff = diff / torch.flip(torch.arange(seq_len),dims=[0])
    return diff

xlim = 6
nbins = 1000
bins = np.linspace(1e-6, xlim, nbins+1)
stability_threshold = 3.0
diffusivity_cutoff = 3000

def distance_pbc_select(x, lattices, indices0, indices1):
    x0 = x[:, indices0]
    x1 = x[:, indices1]
    x0_size = x0.shape[1]
    x1_size = x1.shape[1]
    x0 = x0.repeat([1, x1_size, 1])
    x1 = x1.repeat_interleave(x0_size, dim=1)
    delta = torch.abs(x0 - x1)
    delta = torch.where(delta > 0.5 * lattices, delta - lattices, delta)
    return torch.sqrt((delta ** 2).sum(axis=-1))

def get_water_rdfs(data_seq, ptypes, lattices, bins, device='cpu'):
    """
    get atom-type conditioned water RDF curves.
    """
    data_seq = data_seq.to(device).float()
    lattices = lattices.to(device).float()
    
    type2indices = {
        'H': ptypes == 1,
        'O': ptypes == 8
    }
    pairs = [('O', 'O'), ('H', 'H'), ('H', 'O')]
    
    data_seq = ((data_seq / lattices) % 1) * lattices
    all_rdfs = {}
    n_rdfs = 3
    for idx in range(n_rdfs):
        type1, type2 = pairs[idx]    
        indices0 = type2indices[type1].to(device)
        indices1 = type2indices[type2].to(device)
        data_pdist = distance_pbc_select(data_seq, lattices, indices0, indices1)
        
        data_pdist = data_pdist.flatten().cpu().numpy()
        data_shape = data_pdist.shape[0]
            
        data_pdist = data_pdist[data_pdist != 0]
        data_hist, _ = np.histogram(data_pdist, bins)
        rho_data = data_shape / torch.prod(lattices).cpu().numpy() 
        Z_data = rho_data * 4 / 3 * np.pi * (bins[1:] ** 3 - bins[:-1] ** 3)
        data_rdf = data_hist / Z_data
        all_rdfs[type1 + type2] = data_rdf
        
    return all_rdfs

def load_run(md_dir, atom_types, xlim, bins, stability_threshold, gt_rdfs, gt_diff,rdf_check_interval,lattices):
    if not isinstance(md_dir, Path):
        md_dir = Path(md_dir)
        
    model_name = md_dir.parts[-2]
    seed = md_dir.parts[-1][-1]
    run = {'name': (model_name + f'_seed_{seed}'),}
    
    # load thermo log
    run['thermo'] = get_thermo(md_dir / 'thermo.log')
    md_time = np.array(run['thermo']['time'])
    T = np.array(run['thermo']['T']) # 求切片
    
    collapse_pt = len(T)

    # process trajectory
    traj = [x.positions for x in Trajectory(md_dir / 'atoms.traj')]
    run['traj'] = torch.from_numpy(np.stack(traj))[0:collapse_pt] # 求切片
    # run['traj'] = torch.unique(run['traj'], dim=0) # remove repeated frames from restarting.
    
    
    for i in (range(1, len(T)-rdf_check_interval)):
        timerange = torch.arange(i, i + rdf_check_interval)
        current_rdf = get_water_rdfs(run['traj'][timerange], atom_types, lattices, bins)
        rdf_mae_oo = mae(current_rdf['OO'], gt_rdfs['OO'], xlim)
        rdf_mae_ho = mae(current_rdf['HO'], gt_rdfs['HO'], xlim)
        rdf_mae_hh = mae(current_rdf['HH'], gt_rdfs['HH'], xlim)
        # print(rdf_mae_oo,rdf_mae_ho,rdf_mae_hh)
        run['rdf_mae']=[rdf_mae_oo, rdf_mae_ho, rdf_mae_hh]
        if max([rdf_mae_oo, rdf_mae_ho, rdf_mae_hh]) > stability_threshold:
            collapse_pt = i
            break
        
    run['collapse_pt'] = collapse_pt        
    
    # at least 100 ps for computing diffusivity.
    if collapse_pt >= 1000:
        run['diffusivity'] = get_smoothed_diff(
            run['traj'][:collapse_pt:10, atom_types == 8])[:100]
        run['diff_error'] = float((run['diffusivity'][-1] - gt_diff[-1]).abs())
        run['end_diff'] = float(run['diffusivity'][-1])
    else:
        run['diffusivity'] = None
        run['diff_error'] = np.inf
        run['end_diff'] = np.inf
        
    # at least 1 ps for computing RDFs.
    if collapse_pt >= 10:
        run['rdf'] = get_water_rdfs(run['traj'][:collapse_pt], atom_types, lattices, bins)
        run['rdf_error'] = [mae(run['rdf'][k], gt_rdfs[k], xlim) for k in ['OO', 'HH', 'HO']]
    else:
        run['rdf'] = None
        run['rdf_error'] = [np.inf] * 3
        
    # load test metrics
    if (md_dir / 'test_metric.json').exists():
        test_metrics = get_test_metrics(md_dir)
        run.update(test_metrics)
        
    return run


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--molecule", required=True, type=str)
    parser.add_argument("--model_dir", type=str)
    parser.add_argument("--identifier", type=str)
    parser.add_argument("--save_freq", type=int)
    parser.add_argument("--steps", type=int)

    args, override_args = parser.parse_known_args()

    # eval parameters
    stability_threshold = 3.0
    rdf_check_interval = 10 # 1 ps
    xlim = 6
    n_bins = 500
    bins = np.linspace(1e-6, xlim, n_bins + 1) # for computing RDF

    # get ground truth data
    # DATAPATH = 'DATAPATH/water.npy'
    # gt_data = np.load(DATAPATH, allow_pickle=True).item()

    gt_data = np.load('dataset/water.npz', allow_pickle=True)
    atom_types = torch.tensor(gt_data['atom_types'])
    # lattices = torch.tensor(gt_data['lengths'][0]).float()
    lattices = torch.tensor([9.31848768,9.31848768,9.31848768]).float()
    print(lattices)
    # gt_traj = torch.tensor(gt_data['unwrapped_coords'])
    gt_traj = torch.tensor(gt_data['wrapped_coords'])

    gt_diff = get_smoothed_diff(gt_traj[0::100, atom_types==8])[:100] # track diffusivity of oxygen atoms, unit is A^2/ps
    # print(gt_traj[::10].shape)
    gt_rdfs = get_water_rdfs(gt_traj[::10], atom_types, lattices, bins) # match the recording frequency of 0.1 ps

    # ['type', 'code_version', 'name', 'theory', 'R', 'z', 'F', 'F_min', 'F_max', 'F_mean', 'F_var', 'r_unit', 'e_unit', 'E', 'E_min', 'E_max', 'E_mean', 'E_var', 'md5', 'perms']

    stability_threshold = 3.0
    # load run and plot RDFs
    md_dir = Path('MODELPATH/water_1k_schnet/md_500ps_0_init_0')
    # md_dir = Path('MODELPATH/adv1/water_1k_force/water_1k_forcenet50/md_500ps_0_init_0')
    run = load_run(md_dir, atom_types, xlim, bins, stability_threshold, gt_rdfs, gt_diff,rdf_check_interval,lattices)
    
    # metrics
    force_mae = run['fmae'] * 1000
    collapse_ps = (run['collapse_pt']-1) / 10
    rdf_oo_mae = run['rdf_error'][0]
    rdf_hh_mae = run['rdf_error'][1]
    rdf_ho_mae = run['rdf_error'][2]
    diff_mae = run['diff_error'] * 10 # A^2/ps -> 10^-9 m^2/s 
    print(f'force mae: {force_mae:.1f} meV/A \nstability: {collapse_ps:.1f} ps \nRDF (O,O) mae: {rdf_oo_mae:.2f}' + 
        f'\nRDF (H,H) mae: {rdf_hh_mae:.2f} \nRDF (H,O) mae: {rdf_ho_mae:.2f} \nDiffusivity mae: {diff_mae:.2f} x 10^-9 m^2/s ')
    print(run['rdf_mae'])
    # plt.subplots_adjust()
    # plt.rc('xtick', labelsize=16)
    # plt.rc('ytick', labelsize=16) 
    # plt.rc('legend', fontsize=24)
    # plt.rc('figure', titlesize=24)
    # plt.rc('axes', titlesize=24)
    # plt.rc('axes', labelsize=24)
    # fig, axs = plt.subplots(1, 3)
    # fig.set_size_inches(18, 4) 
    # fig.tight_layout(h_pad=3, w_pad=1)
    
    # for i, elem in enumerate(['OO', 'HH', 'HO']):
    #     axs[i].plot(bins[:-1], gt_rdfs[elem], label='Reference', linewidth=3, linestyle='--')
    #     axs[i].plot(bins[:-1], run['rdf'][elem], label='Prediction', linewidth=3, linestyle='--')
    #     axs[i].set(title=f'RDF {elem}', xlabel='r')
    #     axs[i].legend()
    # axs[0].set_ylabel('RDF(r)')