#/*************************************************************************
#    > File Name: Lib/MG_Operation.py
#    > Author: Yan Wang
#    > Mail: wangyan@imnu.edu.cn
#    > Created Time: 2022年05月26日 星期四 15时22分32秒
# ************************************************************************/
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from File_Operation import *
from Content_Operation import *
from List_Operation import *
from Folder_Operation import *
from Value_Calculation import *
from ROOT_Operation import *
from Parallel_Operation import *

# 手动将参数分为几份,并存入文件中
def Split_Param_with_Num(df, used_map, out_file, node_num):
	Create_Folder_from_FileDir(out_file)   #将out_file的基文件去除,保留目录
	f=open(out_file,'w')   #‘只写’模式打开out_file
	all_split_list={}
	for key in used_map: 
		data_list=df[key].to_list()   #将key转换成list
		split_list = Split_List_n_List(data_list, node_num)   #将data_list分为node_num组
		all_split_list[key]=split_list
		output_string=", ".join(map(str,split_list))   #map()将split_list传参到str，并将字符串以“,"连接到一起
		f.write(output_string)
		f.write("\n")
	f.close()
	print("In Split_Param_with_Num, the output param file is stored in ", out_file)
	return(all_split_list)

# 生成多个MG参数文件。 每个参数生成一个对应的文件
def Generate_MG_ParamCard_from_DF(input_para_df,source,replaced_content,labels, output_general_file, df_lines=[]):
#需要labels 中包含的需要替换的参数与replaced_content 中一致。
	label_count=0
	for keys in labels:
		label_count += labels[keys]

	if len(replaced_content) is not label_count:
		print("Error in Generate_MG_ParamCard_from_DF:  input_dict length is in-consistent with labels line numbers")
		return(None)
	if not df_lines:
		df_lines=len(input_para_df)
# 复制para_card.dat 为多个文件，文件数与data_grid_mh_mHc.tsv中的行数一致。
	replaced_key=Get_Maplist_Key(replaced_content)   #查找replaced_content中的key
	file_name_list=Copy_Multi_File(df_lines,source,output_general_file)    #复制source中的文件添加到output_general_file中，并且文件以df_lines中的数字依次命名
	param_list_all = Mapping_DF_to_Sentence(input_para_df,replaced_key,replaced_content)    #replaced_key替换到input_para_df，input替换的列由replaced_content提供
# 查找需要替换的行的位置
	Replace_Multi_File(labels,file_name_list,source,param_list_all)   #对param_list_all(file_name_list中的)进行labels替换(替换的是source文件)
	return(file_name_list)



# 自动生成几份scan参数文件
# 与 Generate_MG_Param_Card_from_DF 相似， 但是输入变量不同， 可以每一个文件中包含多组参数， 因而用于扫描参数
def Generate_MG_Scan_ParamCard_from_Dict(input_dict,source,replaced_content, labels, output_general_file, df_lines=[]):
#需要input_dict中包含的需要替换的参数与replaced_content 以及 labels中一致。
	if len(input_dict) is not len(replaced_content):
		print("Error in Generate_MG_Scan_ParamCard_from_Dict:  input_dict length is in-consistent with replaced_content")
		return(None)
	label_count=0
	for keys in labels:
		label_count += labels[keys]

	if len(input_dict) is not label_count:
		print("Error in Generate_MG_Scan_ParamCard_from_Dict:  input_dict length is in-consistent with labels line numbers")
		return(None)
	if not df_lines:
		df_lines=len(input_dict)
#设置基本替换文件
# 定义变量
# 将key单独提取为列表
	replaced_key=Get_Maplist_Key(replaced_content)   #提取replaced_content中的key
	file_name_list=Copy_Multi_File(df_lines,source,output_general_file)  #复制source基文件到output_general_file并以target_num依次命名
	param_list_all = Mapping_DictList_to_Sentence(input_dict,replaced_key,replaced_content,df_lines)   #替换文件并且依次命名
# 查找需要替换的行的位置
	Replace_Multi_File(labels,file_name_list,source,param_list_all)   #查找需要替换的位置并进行分组替换
	return(file_name_list)

#覆盖重复文件
def Cover_Duplicated_File(pattern,folder):
	bk_folder=os.path.join(folder,"bk")   #路径拼接
	Create_Folder(bk_folder,"/")   #创建文件夹
	for key in pattern:
		#print("key",key)
		#print("folder",folder)
		files=Find_File(folder, key)   #查找folder下的key行
		#print("files",files)
		sorted_files=Sort_Files_by_Time(files)
		#print("sourted_files",sorted_files)
		if sorted_files:
			return
		for count,file in enumerate(sorted_files):
			if(count!=len(sorted_files)-1):
				Move_File(file,bk_folder)
			



# 整理 MG 生成的 Events 文件夹中的 run 文件夹
def Tidy_MG_Run_Folder(folder_name,num):
	mg_file_category=["banner","shower","delphes_events","delphes\.log","djrs","pts","pythia8\.cmd","pythia8_events","pythia8\.log","unweighted_events"]
	#去除要移动的文件夹中，由于生成错误，造成的多余的重复文件，只留下最新的文件
	Cover_Duplicated_File(mg_file_category,folder_name)
	#现在同类文件只剩一个，对run文件，更改其序号与新文件夹的序号相同。
	files=Find_File(folder_name,"banner")
	renamed_file_path=os.path.join(folder_name,files[0])
	#将 run_03_tag_1_banner.dat 中的03改名为 新文件夹对应的数字
	Rename_Series_Files(num,renamed_file_path)
	#将 tag 序号，均改为 tag_1
	Multi_Rename_Series_Files("tag_\d+",folder_name,"tag_1",count=0)



# 整理 MG 生成的 Events 文件夹
def Tidy_MG_Events_Folder(path):
	#print("In Tidy_MG_Events_Folder, tidy the following Event folder", path)
	for sub_folder in Path(path).iterdir():
		com_name=os.path.basename(sub_folder).split("_",2)[0]
		if com_name != "run":
			continue
		num=int(sub_folder.name.split("_",2)[1])
		format_num=str(num).rjust(2,"0")
		#print("In Tidy_MG_Events_Folder, tidy sub folder", sub_folder)
		Tidy_MG_Run_Folder(sub_folder,format_num)




# 将MG生成的文件从original_folder转移到一个新的文件夹target_folder中，并对run_0x子文件夹重命名为接续着之前的序号
def Move_MG_Event(original_folder,target_folder=""):
	if original_folder == target_folder:
		return(original_folder)
	file_name_list=[]
	Tidy_MG_Events_Folder(original_folder)
	ori_maxnum=len(Find_File(target_folder,"run","folder"))
	#files=Sort_Files_by_Time(target_folder)
	name_list=[]
	for sub_folder in sorted(Path(original_folder).iterdir()):
		#print(sub_folder, ori_maxnum, target_folder)
		if not sub_folder.is_dir():
			continue
		#生成新文件夹序号，应为原有文件夹序号+1
		sub_folder_name=os.path.basename(sub_folder)
		ori_maxnum=ori_maxnum+1
		new_num=str(ori_maxnum).rjust(2,"0")
		Tidy_MG_Run_Folder(sub_folder,new_num)
		#
		#生成新的文件夹名称，序号正好接着原有文件夹名
		if target_folder != "":
			com_name=os.path.basename(sub_folder).split("_",2)[0]   #取sub_folder基文件并且以'_'分割
			new_name=os.path.join(target_folder,com_name+"_"+new_num)   
			name_list.append(Path(new_name))
			if sub_folder != new_name:
				print(sub_folder,new_name)
				#Move_File(sub_folder, new_name)    #将sub_folder文件移动到new_name中
		else:
			name_list.append(Path(str(sub_folder)))
####	#复制文件夹
		#Copy_File(sub_folder, new_name)
		#print("In Move_MG_Event: move ", sub_folder, " to ", new_name)
	return(name_list)


# 返回用于存储的每次不同参数生成的事例文件的文件名，文件名构成一个列表，每个文件对应一个参数点。
# 每个文件中的内容为该参数下生成的所有事例文件名
def Move_MG_Event_MultiRun_for_one_BP(original_folder,target_folder=""):
	out_files=[]
	for sub_folder in sorted(Path(original_folder).iterdir()):
		new_folder_name=Get_File_Name(sub_folder)
		folder_num=new_folder_name.split("_")[-1]   #取sub_folder基文件并且以'_'分割
		if folder_num == "0":
			continue
		event_folder=os.path.join(sub_folder,"Events")   
		if target_folder != "":
			target_sub_folder=target_folder+new_folder_name
			Create_Folder(target_sub_folder,"/")
			new_files=Move_MG_Event(event_folder,target_sub_folder)
			out_file=target_sub_folder+"/"+new_folder_name+"_moved_event_files.dat"
			out_files.append(out_file)
		else:
			new_files=Move_MG_Event(event_folder,target_folder)
			out_file=original_folder+"/"+new_folder_name+"/delphes_event_files.dat"
			out_files.append(out_file)
		f=open(out_file,'w')   #‘只写’模式打开out_file
		for file in new_files:
			f.write(file+"/tag_1_delphes_events.root")
			f.write("\n")
		f.close()
		#print("In Move_MG_Event_MultiRun_for_one_BP: new_folder_name ", new_folder_name, " new_files ", new_files)
	return(out_files)

# 用于完整生成 MG scan 的模拟数据
# para 定义： 
# para["original_folder"]
#     ["split_event_folder"], 
#     ["replace_value"],
#     ["label_value"], 
#     ["data_df"], 
#     ["node_num"], 
#     ["node_name"], 
#     ["length"], 
#     ["source"], 
#     ["general_file_name"], 
#     ["run_card"],  
#     ["delphes_card"], 
	# 根据需要分解的node_num 将MG中的模拟文件夹拷贝 node_num份，

def Select_Para(para, lines):
	para["data_df"]=para["data_df"].loc[np.array(lines)-1]
	para["data_df"]=para["data_df"].reset_index(drop=True)
	para["split_event_folder"]=np.array(para["split_event_folder"])[np.array(lines)-1]
	para["split_scan_file"]=np.array(para["split_scan_file"])[np.array(lines)-1]
	para["split_event_folder_nocut"]=np.array(para["split_event_folder_nocut"])[np.array(lines)-1]
	para["split_scan_file_nocut"]=np.array(para["split_scan_file_nocut"])[np.array(lines)-1]
	para["folder_num"]=len(lines)
	return(para)

def Generate_MG_Events(original_folder,split_folder, para, mg_event_method, has_event=False,df_lines=[]):
#def Generate_MG_Scan_Events(original_folder,split_event_folder, param_replace_value,param_label_value, param_df, node_num, node_name, param_length, param_source, param_general_file_name, run_card, delphes_card):
	# 根据需要分解的node_num 将MG中的模拟文件夹拷贝 node_num份，
	#判断是否存在文件
	if not os.path.exists(original_folder):
		print("Error: In MG step: there is no MG original folder")

	print("The based MG process is :", original_folder)
	for file_name in split_folder:
		if not os.path.exists(file_name):
			#将para复制到file_name
			Copy_File(original_folder,file_name)
		else:
			event_folder=file_name+"/Events"
			if not os.path.exists(event_folder):
				Create_Folder(event_folder,"/")   #创建文件夹
	print("It has been copied into following folders:", split_folder)


	# 生成MG中param的scan文件, 并拷贝至 split_event_folder 定义的文件夹的 Cards/ 中
	print("Generate param files.")
	if mg_event_method == "scan":
		input_value_list=Get_Maplist_Key(para["replace_value"])   #查找key
		input_dict=Make_DictList_from_DF(para["data_df"], para["node_num"], input_value_list, para["length"])   #分组替换，依次命名
		param_file_list=Generate_MG_Scan_ParamCard_from_Dict(input_dict, para["source"], para["replace_value"], para["label_value"], para["general_file_name"],df_lines)    #分组替换，依次命名
	elif mg_event_method == "seperate" or mg_event_method == "slurm" or mg_event_method == "pbs": 
		param_file_list=Generate_MG_ParamCard_from_DF(para["data_df"], para["source"], para["replace_value"], para["label_value"], para["general_file_name"],df_lines)
	else:
		print("In Generate_MG_Events, wrong mg_event_method, please a correct value. The current value is ", mg_event_method)
		return
	print("The param files are stored in ", param_file_list)


	# 根据需要将标准参数文件，复制到需要计算的文件夹中
	if not has_event:
		print("Copy param files into MG event folders.")
		for num in range(para["folder_num"]):
			Copy_File(param_file_list[num],split_folder[num]+"/Cards/param_card.dat")
			Copy_File(para["run_card"],    split_folder[num]+"/Cards/run_card.dat")
			Copy_File(para["delphes_card"],split_folder[num]+"/Cards/delphes_card.dat")

	# 进入各个节点运行 MG
	# 首先将进入每个节点后需要执行的命令，写到 MG_command 列表中， MG_command 的长度与 节点数相同， 也与MG中新拷贝的文件夹数目相同
	if mg_event_method == "scan":
		command=[]
		for index_f, file_name in enumerate(split_folder):
			ssh_para={}
			ssh_para["node_name"]=para["node_name"][index_f]
			ssh_para["command"]="source ~/.zshrc ; {fdir}/bin/generate_events -f".format(fdir=file_name)
			command.append(ssh_para)
		print("Setting mg running command: ", ssh_para)
	# 进入各个节点运行 MG
		print("Begin Parallel running for each mg folder.")
		#Parallel_Running(Run_Command_with_SSH,command,para["record_file"])
		Parallel_Running(Run_Command_with_SSH,command)
	elif mg_event_method == "seperate": 
		if para["folder_num"]%para["node_num"] == 0:
			batch_num=para["folder_num"]//para["node_num"]
		else:
			batch_num=para["folder_num"]//para["node_num"]+1



		folder_batch = Split_List_n_List(split_folder, batch_num)
		print("The folder_batch are ", folder_batch)
		for folder_list in folder_batch:
			command=[]
			for index_f, file_name in enumerate(folder_list):
				ssh_para={}
				ssh_para["node_name"]=para["node_name"][index_f]
				#ssh_para["command"]="source ~/.zshrc ; {fdir}/bin/generate_events -f".format(fdir=file_name)
				ssh_para["command"]="echo yes"
				command.append(ssh_para)
			print("Setting mg running command: ", ssh_para)
			# 进入各个节点运行 MG
			print("Begin Parallel running for each mg folder.")
				#Parallel_Running(Run_Command_with_SSH,command,para["record_file"])
			Parallel_Running(Run_Command_with_SSH,command)
	elif mg_event_method == "slurm": 
		param={}
		param["command"]="sbatch {}/run_slurm.pbs"
		param["folder"]=split_folder
		Slurm_Running(param)
	elif mg_event_method == "pbs": 
		param={}
		param["command"]="qsub {}/run_pbs.pbs"
		param["folder"]=split_folder
		PBS_Running(param)
	else:
		print("In Generate_MG_Events, wrong mg_event_method, please a correct value. The current value is ", mg_event_method)


	return()




# 从自动Scan生成的MG文件夹中，抽取 every generation event number 参数
def Extract_Scan_Generated_Event_Number(target_folder_name):
	numbers= []
	for this_file in Path(target_folder_name).iterdir():
		#file_name = os.path.join(target_folder_name.name ,this_file.name)
		file_kind=this_file.suffix
		if file_kind == ".root":
			file_name =os.path.join( target_folder_name, this_file)
			number= ROOT_Get_AllEntriesNumber(file_name)
			numbers.append(number)
	return(numbers)


# 从自动Scan生成的MG文件夹中，抽取scan参数（因为MG偶尔会中途崩溃，所以不生成总结的scan文件）
def Extract_Scan_Xection(folder_name,output_general_parafile,MGlabel,has_event=True,has_delphes=False):
	each_event_folder=folder_name+"/Events/"
	print("In Extract_Scan_Xection: now extract info in ", each_event_folder)
	para_in_one_node=[]
	output_parafile=output_general_parafile.format(Path(folder_name).name)
	folder_list=Sort_Files_by_Name(os.listdir(each_event_folder))
	if has_event:
		return(para_in_one_node, each_event_folder,output_parafile)
	#for run_folder in Path(each_event_folder).iterdir():
	for run_folder in folder_list:
		one_para={}
		one_para["#run_name"]=each_event_folder+run_folder
		if not Path(one_para["#run_name"]).is_dir():
			continue
		label="#  Integrated weight (pb)  :"
		# default set only one file (run_xx_tag_file.dat) has the label in each folder 
		xection_word_list=Extract_One_Sentence_From_MultiFile(one_para["#run_name"],label)   #查找文件名
		cross=[word.split(":",2)[1] for word in xection_word_list[1]]
		log_file=xection_word_list[0][0]
		if MGlabel == "seperate":
			MGparam_label_value={"Block frblock":2,"23 9.118760e+01 # MZ":4}
		elif MGlabel == "scan":
			MGparam_label_value={"BLOCK FRBLOCK":2,"23 9.118760e+01 # mz":4}
		else:
			print("ERROR: in Extract_Scan_Xection, Please set a correct MGlabel, which should consistent with param_mode in setting. The current one is: ", MGlabel)
		word_list, input_line_list = Find_Line(log_file, MGparam_label_value)#找到行的位置并往下进行替换
		for sentence in word_list:
			split_sentence=list(filter(lambda x : x, sentence.split(" ")))
			value=split_sentence[1]
			para_name=split_sentence[-1].replace("\n","")
			if (para_name == "tb"):
				para_name="tb"
			elif (para_name == "TB"):
				para_name="tb"
			elif (para_name == "sinbma"):
				para_name="sba"
			elif (para_name == "mh1"):
				para_name="mh"
			elif (para_name == "mh2"):
				para_name="mH"
			elif (para_name == "mh3"):
				para_name="mA"
			elif (para_name == "mhc"):
				para_name="mch"
			one_para[para_name]=value
		one_para["cross"]=cross[0]
		if has_delphes:
			number_list=Extract_Scan_Generated_Event_Number(one_para["#run_name"])
			one_para["EventNo"]=number_list[0]

		para_in_one_node.append(one_para)
	Write_List_by_DF_to_File(para_in_one_node,output_parafile)   #写入到.csv输出文件
	return(para_in_one_node, each_event_folder,output_parafile)



# 从 MG 中生成的Event文件里，利用Extract_Scan_Xection提取cross section数据，并组合成 scan 的参数文件。
def Extract_MultiXection_from_Folder(input_folders, output_general_file, scan_file, MGlabel, has_event=True,has_delphes=False):
	print("Extract scan param from MG run folder")
	all_cross=[]
	para_output_folder=[]
	input_data_folder=[]
	print("The scaned param file number is ",len(input_folders))
	for sub_folder in input_folders:
		# _ion means in one node
		para_ion,input_ion,output_ion=Extract_Scan_Xection(sub_folder,output_general_file,MGlabel,has_event,has_delphes) 
		all_cross.append(para_ion)
		input_data_folder.append(input_ion)
		para_output_folder.append(output_ion)
# 将扫描出的参数放入Event对应的文件夹中
		each_event_folder=sub_folder+"/Events/"
		output_parafile=output_general_file.format(Path(sub_folder).name)
		this_scan_file=each_event_folder+scan_file.format(fnum=len(para_ion))
		Copy_File(output_parafile,this_scan_file)#复制output_parafile文件到scan_file
	return(all_cross, para_output_folder, input_data_folder)


# 比较上面提取出的参数文件与想要处理的参数文件中有何不同， 并将缺乏的参数bp点输出 （因为MG有时报错不能完全执行。）
def Dataframe_One_with_Multi_Param_Files(one_input_file, multi_input_files, para_name):
	print("Load origin param, one_input_file", one_input_file)
	print("Load origin param, para_name", para_name)
	origin_df=Load_THDM_Param(one_input_file,para_name)#读取para_name中的one_input_file
    print("origin_df", origin_df)

# 从MG文件中提取的参数已经被放入输出文件夹中，现在将需要的参数那几列提取出来。
	print("Load scaned param")
	generated_df=[]
	for this_file in multi_input_files:
		print("file_name",this_file)
		generated_df.append(Load_THDM_Param(this_file,para_name))   
	return(origin_df, generated_df)



# 将两组文件相比较，并输出不同之处 
def Compare_One_with_Multi_Param_DF(one_input_df, multi_input_df):
	print("In Compare_One_with_Multi_Param_DF, Compare origin param file and extract param file")
#	print("In Compare_One_with_Multi_Param_DF, the input one df is ", one_input_df)
#	print("In Compare_One_with_Multi_Param_DF, the input multi df is ", multi_input_df)
	Judge_different=False
	differents=[]
	current_length=0
	for index_df, this_df in enumerate(multi_input_df):
		begin_num=current_length
		current_length=current_length+len(this_df)
		end_num=current_length
		print("In Compare_One_with_Multi_Param_DF "," the selected rows are from", begin_num, " to ", end_num)
		split_df=one_input_df[begin_num:end_num]
		split_df=split_df.reset_index(drop=True)   #重置索引(重新定义行数和列数,都从0开始),drop=True:不想保留原来的index 
		#different_part=this_df.compare(split_df)   #compare()与this_df比较并显示差异
		print("this_df",this_df)
		print("split_df",split_df)
		different_part=datacompy.Compare(this_df,split_df,abs_tol=0.01,join_columns=['mH'])
		if not different_part.matches():
		#if not different_part.empty:
			Judge_different=True
			print("The different part is in ", index_df+1)
			print("The different content is ", different_part)
			differents.append(different_part)
	return(Judge_different,differents)


# 合并几个参数在一起
# 将整理出的参数文件放入 MG 对应的文件夹中
def Pack_Scan_Param(combined_file, output_folder):
	generated_df=[]
	for this_file in output_folder:
		print("file_name",this_file)
		generated_df.append(Load_THDM_Param(this_file))
	combined_df=pd.concat(generated_df)   #拼接dataframe或者series
	combined_df=combined_df.reset_index(drop=True)
	this_combined_file=combined_file.format(fnum=len(combined_df))
	combined_df.to_csv(this_combined_file)
	print("The combined scan para files are in ", this_combined_file)
	return(this_combined_file)


# 将上述函数整合在一起， 读取并整理scan的参数结果
def Collect_MG_Cross(split_event_folder,combined_file, param_out_file, output_general_file, original_parafile, scan_file, para_name, MGlabel, has_event=True, has_delphes=False):
	Create_Folder_from_FileDir(combined_file,"/")
	all_cross, para_output_folder, input_data_folder=Extract_MultiXection_from_Folder(split_event_folder, output_general_file, scan_file, MGlabel, has_event, has_delphes) # 从MG中生成的split_event_folder，利用Extract_Scan_Xection提取cross section数据，并输出文件为output_general_file(带有scan参数的)。
	print("In Collect_MG_Cross, the all_cross is ", all_cross)
	print("In Collect_MG_Cross, the run event number is ", all_cross)
	for cross_i, cross_info in enumerate (all_cross):
		print("In ",cross_i, "-th event, run number is ", len(cross_info))
	print("In Collect_MG_Cross, the para_output_folder is ", para_output_folder)
	print("In Collect_MG_Cross, the input_data_folder is ", input_data_folder)
	origin_df, generated_df=Dataframe_One_with_Multi_Param_Files(original_parafile, para_output_folder, para_name)    #将以上函数提取出来的数据列表(只列出para_name)
	#Judge_different, differents=Compare_One_with_Multi_Param_DF(origin_df, generated_df)  #比较参数文件的不同之处，并输出未run的Bp点
	#print("Two param files are different, still have some BPs are not generated, please check:",output_general_file)
	#if not Judge_different :
	#	print("Two param files are different, still have some BPs are not generated, please check:",output_general_parafile)
	Pack_Scan_Param(combined_file, para_output_folder) #合并参数文件
	Copy_File(combined_file.format(fnum=len(origin_df)),param_out_file)
	return(all_cross, para_output_folder, input_data_folder)


