 # -*- coding: utf-8 -*-

from __future__ import print_function
import sys
sys.path.append('/opt/alps/lib')
sys.path.append('/share/opt/alps/lib')
import pyalps
import matplotlib.pyplot as plt
import pyalps.plot
import numpy as np
from numpy import array,shape,arange
import time
#import scipy.io
import glob, os, shutil
import multiprocessing

#----------------------1 Run the Task------------------------
run_with_GUI = 0
numcores = 15
run_the_task = 1
Paras = np.arange(50, 800, 50)
#Paras = np.arange(32,130,2)
#Paras = [128]
(lattice_library,L)=('lattice_196.xml',196)

oddl_pairs = []
pair = [L/2-1,L/2+1-1]
for times in range(L/2):
    oddl_pairs.append(pair)
    pair = [pair[0]-1,pair[1]+1]

evenl_left_pairs = []
pair = [1-1,L-1-1]
for times in range(L/2-1):
    evenl_left_pairs.append(pair)
    pair = [pair[0]+1,pair[1]-1]

evenl_right_pairs = []
pair = [2-1,L-1]
for times in range(L/2-1):
    evenl_right_pairs.append(pair)
    pair = [pair[0]+1,pair[1]-1]

measured_pairs = oddl_pairs + evenl_left_pairs + evenl_right_pairs
measured_pairs = [str(tuple(i)) for i in measured_pairs]
measured_pairs = ','.join(measured_pairs)
#    measured_pairs = [str((L//2-1,L//2)),str((L//2,L//2+1))]
#    measured_pairs = [str((i,i+1)) for i in range(L-1)]
#    measured_pairs = [str((L//2-1-halfl,L//2-1+halfl)) for halfl in np.arange(L//2-1)+1]

task_name = 'mps_spin_one'  # global variable, can be accessed by function Runmps()
sweeps = 15
#maxstates = 150
Sz_total = 0

def Runmps(para):
    maxstates = para
    ##infomation hidden,ie (maxstates,sweeps) = para, or sweeps=para
    parm = {}
    parm['LATTICE_LIBRARY'] = lattice_library
    parm['LATTICE'] = 'open chain lattice with special edges'
    parm['L'] = L
    parm['MODEL'] = 'spin'
    parm['local_S0'] = 0.5
    parm['local_S1'] = 1
    parm['COMPLEX'] = 1
    parm['CONSERVED_QUANTUMNUMBERS'] = 'Sz'
    parm['Sz_total'] = Sz_total
    parm['J'] = 1
    parm['MAXSTATES'] = maxstates
    parm['SWEEPS'] = sweeps
    parm['NUMBER_EIGENVALUES'] = 1
#    parm['MEASURE[EnergyVariance]'] = 1
    parm['MEASURE_LOCAL_AT[custom_SzSz]'] = 'Sz:Sz|'+measured_pairs
#    parm['MEASURE_LOCAL_AT[custom_SplusSminus]'] = 'Splus:Sminus|'+measured_pairs
    parm = [parm]

    input_file = pyalps.writeInputFiles(task_name + '_para=' + str(para), parm)
    time1 = time.time()
    pyalps.runApplication('mps_optim', input_file, writexml=True)
    time2 = time.time()
    if run_with_GUI == 0:
        doc1 = open('1Run_print.txt', 'a')
        print(
            'para = %s task finished with time %.2f minutes.' %
            (para, (time2 - time1) / 60.0),
            file=doc1)
        doc1.close()
    else:
        print('para = %s task finished with time %.2f minutes.' %
              (para, (time2 - time1) / 60.0))

def delete_old_outfiles():  # 删除上一次运行得到的文件
    old_in_files = glob.glob('*%s*.in.*' % task_name)
    old_out_files = glob.glob('*%s*.out.*' % task_name)
    for i in old_in_files + old_out_files:
        try:
            os.remove(i)  #不能删文件夹
        except OSError:
            try:
                shutil.rmtree(i)  #用来删文件夹
            except:
                print('Something wrong when deleting files.')

##如果不需要接着计算，删除占用大的MPS波函数chkp文件夹,就运行这个函数
def delete_MPS_wavefunction():
    chkp_files = glob.glob('*%s*out*.chkp' % task_name)
    for i in chkp_files:
        try:
            shutil.rmtree(i)  #用来删文件夹
        except:
            print('Maybe no such file to delete.')
    print('check point files deleted.')

if run_the_task == 1:

    delete_old_outfiles()  # 决定是否删除上一次运行得到的文件

    pool = multiprocessing.Pool(processes=numcores)
    ##    Paras = [i+1 for i in range(20)]
    print('report: This task has %d parallel task/tasks.' % (len(Paras)))
    print('task %s is Running...' % (task_name))
    pool.map(Runmps, Paras, 1)
    pool.close()
    pool.join()
    if run_with_GUI == 0:
        doc1 = open('1Run_print.txt', 'a')
        print('Finished with all tasks.', file=doc1)
        doc1.close()
    else:
        print('Finished with all tasks.')
    delete_MPS_wavefunction()  # 删除chkp文件夹
else:
    print('Task did not run, continue with data analysis.')
## 如果run_the_task为0，就只有最开始的一点会运行

#----------------------2 结果分析----------------------------------------------
result_files = pyalps.getResultFiles(prefix=task_name)
assert result_files != [], 'Report: Error, no result file has been calculated out. The calculation has definitly failed.'
## 如果没有产生结果文件，马上报错，而不是隐藏着继续运行
if run_with_GUI == 0:
    plt.switch_backend('pdf')
## 解决无GUI界面不能正常画图的问题

## special lattice support version
## method有'full'默认,以及'half_hermitian','half_symmetric',对付不同的关联矩阵
## 添加自定义pairs的关联测量数据处理，method用'custom'，返回矩阵中没有测量的全部赋NaN
def get_correlation_matrices_v4(propstring, correlator_string, method='full',special_lattice=0):
    import numpy as np
    def get_correlation_matrix_of_dataset(dataset, method):
        L = int(dataset.props['L'])
        correlation_order = dataset.x
        correlation_data = dataset.y
        ## 不同于dmrg，mps_optim算出的correlator是乱序的，还缺少主对角线上的元素
        correlation_matrix = np.zeros((L, L))
        correlation_matrix = correlation_matrix.astype(complex)  ## to support complex
        correlation_matrix[:] = complex('NaN')
        if method == 'custom':
            if len(np.shape(correlation_order))==1:
                measure_input = dataset.props['MEASURE_LOCAL_AT['+dataset.props['observable']+']']
                measure_pairs = measure_input.split('|')[1]
                measure_pairs_split = np.array([i for i in measure_pairs])
                b0 = np.where(measure_pairs_split=='(')
                b1 = np.where(measure_pairs_split==',')
                idx1 = b0[0][0]
                idx2 = b1[0][0]
                rowstring = measure_pairs[idx1+1:idx2]
                row = int(rowstring)
                correlation_order = np.array([[row,i] for i in correlation_order])
        n = 0
        if special_lattice==0:
            for order in correlation_order:
                correlation_matrix[order[0], order[1]] = correlation_data[0][n]
                n += 1
        elif special_lattice==1:
            for order in correlation_order:
                correlation_matrix[order[0]-1, order[1]-1] = correlation_data[0][n]
                n += 1
        for i in range(L):
            correlation_matrix[i, i] = complex('NaN')
        calcued_elements = np.shape(correlation_order)[0]
        if method == 'full':
            assert calcued_elements == L**2 - L, 'Report: Error, you put half measured correlation matrix into full one program'
        elif method == 'half_hermitian':
            assert calcued_elements != L**2 - L, 'Report: Error, you put full measured correlation matrix into half one program'
            correlation_matrix = correlation_matrix + correlation_matrix.conj().T
        elif method == 'half_symmetric':
            assert calcued_elements != L**2 - L, 'Report: Error, you put full measured correlation matrix into half one program'
            correlation_matrix = correlation_matrix + correlation_matrix.T
        elif method =='custom':
            return correlation_matrix
        else:
            raise Exception, "Invalid method for my correlation matrix !"
        return correlation_matrix
    eigen_measure_obs = pyalps.loadEigenstateMeasurements(
        result_files, what=[correlator_string])
    ku = [i[0] for i in eigen_measure_obs]
    def by_props(dataset):  ##把DataSet按照其props中的mu值排序
        return dataset.props[propstring]
    ## 自己实现collectXY就比较灵活
    obs_sorted = sorted(ku, key=by_props)
    paras = [i.props[propstring] for i in obs_sorted]
    correlation_matrices = [get_correlation_matrix_of_dataset(i, method) for i in obs_sorted]
    paras = np.array(paras)
    correlation_matrices = np.array(correlation_matrices)
    return (paras, correlation_matrices)


(Ds, SzSz_correlation_matrices) = get_correlation_matrices_v4('MAXSTATES',
 'custom_SzSz', 'custom',special_lattice=1)

def get_pairs_length(pairs):
    lengths = []
    for i in pairs:
        length = i[1]-i[0]
        lengths.append(length)
    return lengths

oddl_lengths = get_pairs_length(oddl_pairs)
evenl_left_lengths = get_pairs_length(evenl_left_pairs)
evenl_right_lengths = get_pairs_length(evenl_right_pairs)

def get_matrix_elements(pairs,matrix):
    elements = []
    for locat in pairs:
        element = matrix[locat[0]][locat[1]]
        elements.append(element)
    elements = array(elements)
    return elements

correlation_lengths = []
for SzSz_correlation_matrix in SzSz_correlation_matrices:
    #SzSz_correlation_matrix = SzSz_correlation_matrices[0]
    oddl_elements = get_matrix_elements(oddl_pairs,SzSz_correlation_matrix)
    evenl_left_elements = get_matrix_elements(evenl_left_pairs,SzSz_correlation_matrix)
    evenl_right_elements = get_matrix_elements(evenl_right_pairs,SzSz_correlation_matrix)
    
    if evenl_left_lengths==evenl_right_lengths:
        evenl_elements = (evenl_left_elements+evenl_right_elements)/2.0
        evenl_lengths = evenl_left_lengths
    
    l_elements = np.real(np.concatenate((oddl_elements,evenl_elements)))
    l_lengths = array(oddl_lengths+evenl_lengths)
    sorted_index = np.argsort(l_lengths)
    ls = l_lengths[sorted_index]
    Cls = l_elements[sorted_index]
    flipped_Cls = ((-1)**ls)*np.sqrt(ls)*Cls
    shauijian_curve = np.log(flipped_Cls)
    selected_ls = ls[74:125]
    selected_shauijian_curve = shauijian_curve[74:125]
    polydata = np.polyfit(selected_ls,selected_shauijian_curve,1)
    slope = polydata[0]
    correlation_length = -1.0/slope
    correlation_lengths.append(correlation_length)

correlation_lengths = array(correlation_lengths)
Ds = Paras
fig = plt.figure()
(fig, ax) = plt.subplots(figsize=(4, 3), dpi=300)
ax.plot(Ds,correlation_lengths)
#ax.plot(ls,np.log(flipped_Cls))
##ax.plot(ls,flipped_Cls)
#ax.legend(loc=4,fontsize=6)
ax.set_xlabel('D')
ax.set_ylabel('SzSz correlation length')
#ax.set_ylabel('lnC(l)')
#ax.grid()
#ax.set_title('C(l) of SzSz')
#ax.set_xlim([0,ls[-1]])
##ax.set_yscale('log')
plt.tight_layout() ##solve figure in pdf saved cutted off
##plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
fig.savefig('dema.pdf')


#(Ds, SplusSminus_correlation_matrices) = get_correlation_matrices_v2('MAXSTATES',
# 'custom_SplusSminus', 'custom')
#print(SzSz_correlation_matrices)
#print('Next')
#print(SplusSminus_correlation_matrices)


#(Ds, SzSz_correlation_matrices) = get_correlation_matrices_v2(
#    'MAXSTATES', 'SzSz', 'half_symmetric')
#(Ds, SplusSminus_correlation_matrices) = get_correlation_matrices_v2(
#    'MAXSTATES', 'SplusSminus', 'half_hermitian')
#SminusSplus_correlation_matrices = np.conjugate(SplusSminus_correlation_matrices)
#
#middles = [L // 2 -1] * len(Paras)
#
#def get_middle_data(correlation_matrices):
#    middle_data = []
#    for i in range(len(correlation_matrices)):
#        matrix = correlation_matrices[i]
#        middle = middles[i]
#        middle_data.append(matrix[middle][middle + 1])
#    middle_data = np.array(middle_data)
#    return middle_data
#Sz_corre_middle = get_middle_data(SzSz_correlation_matrices)
#SplusSminus_corre_middle = get_middle_data(SplusSminus_correlation_matrices)
#SminusSplus_corre_middle = get_middle_data(SminusSplus_correlation_matrices)
#
#e0_middle = 1.0/2.0*(SplusSminus_corre_middle+SminusSplus_corre_middle)+Sz_corre_middle
#e0_middle = np.real(e0_middle)
#print(Ds)
#print(e0_middle)



## 适用于多能级和单能级的get obserable,适合local observable，Energy，Var[H],不含Truncated_Weight
## 这里只是整理数据，数据都是MPS那边出来的
def get_obervable_v3(propstring, observablestr):  ## for any energy levels
    import numpy
    from copy import deepcopy
    eigen_measure_obs = pyalps.loadEigenstateMeasurements(
        result_files, what=[observablestr])
    ## 需要各组参数计算，都拥有相同数目的能级,依赖alps从低能级到高能级排
    observables = []
    num_levels = len(eigen_measure_obs[0][0].y)
    for i in range(num_levels):
        eigen_measure_obs_i = deepcopy(eigen_measure_obs)
        for j in eigen_measure_obs_i:
            j[0].y = np.array([j[0].y[i]])
        paras_vs_observables_i = pyalps.collectXY(
            eigen_measure_obs_i, x=propstring, y=observablestr)
        paras = paras_vs_observables_i[0].x
        observables_i = paras_vs_observables_i[0].y
        observables.append(observables_i)
    if num_levels == 1:
        observables = observables[0]
    observables = numpy.array(observables)
    return (paras, observables)


#(Ds, energy_variance) = get_obervable_v3('MAXSTATES', 'EnergyVariance')
##print(Ds)
#print('energy_variance is %s'%(energy_variance))

#(Ls, energy_variance) = get_obervable_v3('L', 'EnergyVariance')
#print(Ls)
#print(energy_variance)

#(Ls, Splus_expectation) = get_obervable_v3('L', 'local_magentization')

#(Ls, Sz_expectation) = get_obervable_v3('L', 'local_magentization')
#Sz_expectation = Sz_expectation[0]
#sites = np.arange(Paras[0]) + 1
#print(Sz_expectation)
#
#fig = plt.figure()
#(fig, ax) = plt.subplots(figsize=(4, 3), dpi=300)
#ax.plot(sites, Sz_expectation,'.')
#ax.set_xlabel('chain sites')
#ax.set_ylabel('<Sz(i)>')
#ax.set_title('Sz_total=%s ground Sz(i)' % (Sz_total))
#
##ax.legend(loc=1,fontsize=8)
##ax.set_yscale('log')
#ax.set_xlim([0,Paras[0]])
#ax.set_ylim([-0.08,0.1])
##ax.grid()

#plt.tight_layout() ##solve figure in pdf saved cutted off
#fig.savefig('SPINmdoel_sweeps_vs_energy.pdf')

#(Ls, energy) = get_obervable_v3('L', 'Energy')
#print(Ls)
#print(energy)
#np.savez(
#    'Sz_total_%s_Ls_vs_ground_energy_data' % (Sz_total),
#    Ls=Ls,
#    energy=energy)

### get energy gap
#def get_gap(propstring):
#    (paras,energy) = get_obervable_v3(propstring, 'Energy')
#    assert len(energy)==2, 'Calculated energy levels are not 2.'
#    Egaps = energy[1]-energy[0]
#    return (paras, Egaps)
#
#
#(Ls, energy_gaps) = get_gap('L')
#print(Ls)
#print(energy_gaps)
#np.savez(
#    'maxstates%s_moreLs_vs_gaps_data' % (maxstates),
#    Ls=Ls,
#    energy_gaps=energy_gaps)


## MPS得truncation error的代码，有点长，单独放着
def get_truncated_weight(propstring):  ## for any energy levels
    import numpy
    from copy import deepcopy

    def load_truncated_weight_for_dset_general_level(DataSet):
        ## 得到truncated_weight的核心:
        def load_truncated_weight_for_dset(filename):
            #根据filename在h5文件中强行找到这个DataSet对应的truncated_weight
            ar = pyalps.hdf5.archive(filename)
            sweeps = ar.list_children('/spectrum/iteration')
            sweeps = [int(s) for s in sweeps]
            max_sweep = max(sweeps)
            truncated_weight_list = ar['/spectrum/iteration/' + str(max_sweep)
                                       + '/results/TruncatedWeight/mean/value']
            truncated_weight = max(truncated_weight_list)
            return truncated_weight

        num_levels = len(DataSet.y)
        truncated_weights = []
        if num_levels > 1:
            for i in range(num_levels):
                filename = DataSet.props['filename']
                ff = filename.split('.')
                ff.insert(-1, str(i))
                filename = '.'.join(ff)
                truncated_weight = load_truncated_weight_for_dset(filename)
                truncated_weights.append(truncated_weight)
        else:
            filename = DataSet.props['filename']
            truncated_weight = load_truncated_weight_for_dset(filename)
            truncated_weights.append(truncated_weight)
        truncated_weights = numpy.array(truncated_weights)
        return truncated_weights

    eigen_measure_obs = pyalps.loadEigenstateMeasurements(
        result_files, what=['Energy'])
    for i in eigen_measure_obs:
        DataSet = i[0]
        truncated_weights = load_truncated_weight_for_dset_general_level(
            DataSet)
        i[0].y = truncated_weights
        i[0].props['observable'] = 'Truncated_Weight'

    ## 需要各组参数计算，都拥有相同数目的能级,依赖alps从低能级到高能级排
    truncated_weights = []
    num_levels = len(eigen_measure_obs[0][0].y)
    for i in range(num_levels):
        eigen_measure_obs_i = deepcopy(eigen_measure_obs)
        for j in eigen_measure_obs_i:
            j[0].y = np.array([j[0].y[i]])
        paras_vs_observables_i = pyalps.collectXY(
            eigen_measure_obs_i, x=propstring, y='Truncated_Weight')
        paras = paras_vs_observables_i[0].x
        observables_i = paras_vs_observables_i[0].y
        truncated_weights.append(observables_i)
    if num_levels == 1:
        truncated_weights = truncated_weights[0]
    truncated_weights = numpy.array(truncated_weights)
    return (paras, truncated_weights)


#(maxstates, truncation_error) = get_truncated_weight('MAXSTATES')
#print(maxstates)
#print(truncation_error)

## 画图代码备用
#fig = plt.figure()
#(fig, ax) = plt.subplots(figsize=(4, 3), dpi=300)
#for i in range(len(result_files)): # 多个curve自动上色
#    ax.plot(xdatas[i], ydatas[i]-ydatas[i][-1], label='L=%s'%(label_list[i]))
#ax.legend(loc=1,fontsize=8)
#ax.set_xlabel('MPS sweeps')
#ax.set_ylabel('Energy Difference with final')
#ax.set_yscale('log')
##ax.set_ylim([1e-8,1e2])
#ax.grid()
#ax.set_title('SPINmodel MPS convergence')
#plt.tight_layout() ##solve figure in pdf saved cutted off
#fig.savefig('SPINmdoel_sweeps_vs_energy.pdf')

##万丈高楼平地起，附上v1代码：
#def get_obervable_v1(propstring, observablestr):  ##for 1 energy level
#    #从结果文件中extract观测量随参数的变化数据,依赖全局变量result_files
#    eigen_measure_obs = pyalps.loadEigenstateMeasurements(
#        result_files, what=[observablestr])
#    paras_vs_observables = pyalps.collectXY(
#        eigen_measure_obs, x=propstring, y=observablestr)
#    paras = paras_vs_observables[0].x
#    observables = paras_vs_observables[0].y
#    return (paras, observables)
###############################################################
