import xml.etree.ElementTree as ET
from moviepy.editor import *
from moviepy.audio.AudioClip import AudioArrayClip
import numpy as np
import librosa

import os
import sys

"""
修改vclip->增加区段变速功能 {完成-未验证}
添加effect {未完成}
添加bgm {思考中}
添加片头 {未完成}
"""

FONT = 'Microsoft-YaHei-&-Microsoft-YaHei-UI'
text_buf = {}


def mySpeedx(vclip, speedx=1.0, final_duration = None):
	if final_duration:
		speedx = vclip.duration / final_duration*1.0
	if speedx == 1.0:
		return vclip
	if vclip.audio:
		audio = vclip.audio
		vclip_v = vclip.set_audio(None).speedx(speedx)
		sr = audio.fps
		y = audio.to_soundarray()
		if y.ndim == 2:
			yl,yr = y[:,0],y[:,1]
			nyl = librosa.effects.time_stretch(yl,speedx*1.0)
			nyr = librosa.effects.time_stretch(yr,speedx*1.0)
			ny = np.vstack([nyl,nyr]).T
			vclip_a = AudioArrayClip(ny,sr).set_duration(vclip_v.duration)
		elif y.nidm == 1:
			ny = librosa.effects.time_stretch(y,speedx*1.0)
			vclip_a = AudioArrayClip(ny,sr).set_duration(vclip_v.duration)
		else:
			raise Exception("audio channals num > 2")
		return vclip_v.set_audio(vclip_a)
	else:
		return vclip.speedx(speedx)

def set_text(s, dur):
	if s == "":
		return None
	if s in text_buf:
		return text_buf[s].set_duration(dur)
	else:
		tclip = TextClip(s, fontsize=36, kerning=2, font=FONT, color='white').set_opacity(0.95).set_duration(dur).on_color(color=(0,0,0),col_opacity=0.35)
		text_buf[s] = tclip
		return tclip

def set_tclip_pos(tclip, pos=(1048,916)):
	return tclip.set_position(pos)

def get_para(attrib, key, default_value):
	if key not in attrib:
		return default_value
	else:
		return attrib[key]

def apply_volumex(audio,spx):
	# if spx >= 1:
		# return audio.volumex(0.1 + 0.2 / spx)
	# elif spx > 0:
		# return audio.volumex(0.1 + 0.2 * spx)
	# else:
		# raise Exception(f"get_volumex para_spx{spx} error")
	return audio.volumex(0.33) if audio else None

volumex_map = {}
BGM_VOL_VOICE = 0.2
BGM_VOL_ASIDE = 0.8
BGM_VOL_SWITH_TIME = 0.5
def volumex_bgm(bgm_id, v_dur, a_dur):
	bgm,start,end = bgm_map[bgm_id]
	if bgm_id in volumex_map:
		vol, stamp = volumex_map[bgm_id]
	else:
		vol, stamp = 0, start
	def split_bgm(bgm, stamp, dur):
		_bgms = []
		while dur > 0:
			if stamp + dur <= end:
				_bgm = bgm.subclip(stamp,stamp+dur)
				_bgms.append(_bgm)
				stamp = stamp + dur
				dur = 0
			else:
				_bgm = bgm.subclip(stamp,end)
				_bgms.append(_bgm)
				dur -= end - stamp
				stamp = start
		return concatenate_audioclips(_bgms), stamp
	bgm_buf = []
	
	tar_vol = 0
	tar_dur = BGM_VOL_SWITH_TIME
	def fadein(gf,t):
		gft = gf(t)
		if np.isscalar(t):
			factor = tar_vol*t/tar_dur + vol*(tar_dur-t)/tar_dur if t < tar_dur else tar_vol
			factor = np.array([factor,factor])
		else:
			factor = np.clip(tar_vol*t/tar_dur + vol*(tar_dur-t)/tar_dur,min(vol,tar_vol),max(vol,tar_vol))
			factor = np.vstack([factor,factor]).T
		return factor * gft
	# breakpoint()
	if a_dur > 0:
		a_bgm, stamp = split_bgm(bgm, stamp, a_dur)
		tar_vol = BGM_VOL_VOICE
		tar_dur = min(BGM_VOL_SWITH_TIME,a_bgm.duration / 2)
		bgm_buf.append(a_bgm.fl(fadein,keep_duration=True))
	if v_dur > a_dur:
		b_bgm, stamp = split_bgm(bgm, stamp, v_dur-a_dur)
		tar_vol = BGM_VOL_ASIDE
		tar_dur = min(BGM_VOL_SWITH_TIME,b_bgm.duration / 2)
		bgm_buf.append(b_bgm.fl(fadein,keep_duration=True))
	volumex_map[bgm_id] = (tar_vol, stamp)
	return concatenate_audioclips(bgm_buf)

img_buf = {}
# return ImageClip(Composited)
def parse_imageList(node, only_check=False):
	assert node.tag == "imageList"
	iclips = []
	for cnode in node:
		if cnode.tag == 'image':
			para,src = eval(get_para(cnode.attrib,'para','(0,0)')), cnode.attrib['src']
			if not only_check:
				iclip = img_buf.setdefault(src, ImageClip(src))
				if len(para) == 2:
					iclip = iclip.set_position(para)
				elif len(para) == 4:
					iclip = iclip.set_position(para[0:2]).resize(para[2:])
				else:
					raise Exception(f"parse_imageList#{cnode.tag}->para={para} parameter error")
			else:
				if not os.path.exists(src):
					raise Exception(f"parse_imageList#src={src} not exist")
				else:
					iclip = None
		else:
			raise Exception(f"parse_imageList#{cnode.tag} tag error")
		if iclip:
			iclips.append(iclip)
	return CompositeVideoClip(iclips) if len(iclips) else None

ICLIP_SPX = None
# return (anchor, iclip, aclip, tclip, efts, bgm, spx=None), stamp
def parse_iclip(node, audio, stamp=0, afps=44100, bgm="", only_check=False):
	assert node.tag == "iclip"
	iclip, aclip, tclip, efts = None, None, None, None
	for cnode in node:
		if cnode.tag == "imageList":
			iclip = parse_imageList(cnode, only_check)
		elif cnode.tag == "dub":
			aclip, tclip, stamp = parse_dub(cnode, audio, stamp, afps, only_check)
		elif cnode.tag == "effects":
			efts = parse_effects(cnode)
		else:
			raise Exception(f"parse_iclip#->{cnode.tag} tag error")
	assert tclip and aclip
	if not only_check:
		if iclip == None:
			iclip = get_blankvideo(tclip.duration, (1920,1080))
		else:
			iclip = iclip.set_duration(tclip.duration)
	anchor = node.attrib['anchor']
	bgm = get_para(node.attrib, 'bgm', bgm)
	return (anchor,iclip,aclip,tclip,efts,bgm,ICLIP_SPX),stamp

# return (vclips,tclips,spxs)
def parse_subVclips(node, video, section, speedx=10, aside="", only_check=False):
	vclips,tclips,spxs = [],[],[]
	vs,ve = section
	if vs > ve:
		raise Exception(f"parse_subVclips#section{section} section error")
	vfps = video.fps
	def check_aside(s,dur):
		if s:
			tclip = set_text(s, dur)
			tclips.append(tclip)
		else:
			tclips.append(None)
	for cnode in node:
		if cnode.tag == "subVclip":
			start,end = eval(cnode.attrib['para'])
			asd = get_para(cnode.attrib,'aside', aside)
			if start < end:
				spx = eval(cnode.attrib['speedx']) if 'speedx' in cnode.attrib else speedx
				# check pre vclip
				if start > vs:
					if not only_check and speedx > 0:
						vclip = video.subclip(vs/vfps, start/vfps)
						vclips.append(vclip)
						check_aside(aside,vclip.duration)
						spxs.append(speedx)
					vs = start
				elif start == vs:
					pass
				else:
					raise Exception(f"parse_subVclips#section{section},para({start},{end}) section error")
				# check vclip
				if spx > 0:
					if not only_check:
						vclip = video.subclip(start/vfps, end/vfps)
						vclips.append(vclip)
						check_aside(asd,vclip.duration)
						spxs.append(spx)
				vs = end
			# check freezen clip
			elif start == end:
				dur = eval(get_para(cnode.attrib,'dur','1'))
				if not only_check and speedx > 0:
					iclip = video.to_ImageClip(start/vfps).set_duration(dur)
					vclips.append(iclip)
					check_aside(asd,dur)
					spxs.append(1)
				vs = end
	# check post vclip
	if vs < ve and speedx > 0:
		if not only_check:
			vclip = video.subclip(vs/vfps,ve/vfps)
			vclips.append(vclip)
			check_aside(aside,vclip.duration)
			spxs.append(speedx)
	return vclips, tclips, spxs
	

# return ([anchor,vclip,aclip,tclip,efts,bgm,used_spx],para,stamp)
def parse_vclip(node, video, audio, section, speedx=10, aside="", stamp=0, afps=44100, bgm="", post_vclip=False, only_check=False):
	assert node.tag == "vclip"
	vs, ve = section
	vfps = video.fps
	start,end = eval(node.attrib['para'])
	spx = eval(node.attrib['speedx']) if 'speedx' in node.attrib else speedx
	asd = node.attrib['aside'] if 'aside' in node.attrib else aside
	bgm = get_para(node.attrib, 'bgm', bgm)
	try:
		assert start >= vs and start <= end and end <= ve
	except:
		raise Exception(f"parse_vclip#section({vs},{ve}),para({start},{end}) para error")
	res_list = []
	
	if start > vs and speedx > 0:
		# pre vclip
		if not only_check:
			vclip = mySpeedx(video.subclip(vs/vfps, start/vfps),speedx)
			vclip.audio = apply_volumex(vclip.audio, speedx)
			if aside:
				tclip = set_text(aside, vclip.duration)
			else:
				tclip = None
			anchor = f"({vs},{start},{speedx*1.0},{aside})"
			res_list.append((anchor,vclip,None,tclip,None,bgm,speedx))
		vs = start
		
	dub_node = node.find('dub')
	if dub_node:
		aclip, tclip, stamp = parse_dub(node.find('dub'), audio, stamp, afps, only_check)
	else:
		aclip, tclip = None, None
	effects_node = node.find('effects')
	if effects_node:
		efts = parse_effects(effects_node)
	else:
		efts = None
	
	if start == end:
		# freezen frame
		dur = eval(get_para(node.attrib,'dur','1'))
		if not only_check and dur > 0:
			iclip = video.to_ImageClip(start/vfps)
			if aclip and tclip:
				if aclip.duration >= dur:
					iclip = iclip.set_duration(aclip.duration)
				else:
					iclip = iclip.set_duration(dur)
					if asd:
						# add aside
						_tclip = set_text(asd, dur - tclip.duration)
						tclip = concatenate_videoclips([tclip, _tclip])
			else:
				iclip = iclip.set_duration(dur)
		else:
			iclip = None
		anchor = f"({start},{end},{dur*1.0},{asd})"
		res_list.append((anchor,iclip,aclip,tclip,efts,bgm,ICLIP_SPX))
		vs = start
	elif start < end:
		subVclips_node = node.find('subVclips')
		if subVclips_node:
			_vclips, _tclips, _spxs = parse_subVclips(subVclips_node, video, (start,end), spx, asd, only_check)
		else:
			_vclips, _tclips, _spxs = [],[],[]
		if spx > 0 or len(_vclips) > 0:
			# clip
			if not only_check:
				if len(_vclips) == 0:
					vclip = video.subclip(start/vfps, end/vfps)
					_vclips = [vclip]
					if asd:
						_tclips = [set_text(asd,vclip.duration)]
					else:
						_tclips = [None]
					_spxs = [spx]
			used_spx = None
			if not only_check:
				org_dur = sum(map(lambda v:v.duration,_vclips))
				t_dur = sum(map(lambda vx:(vx[0].duration/vx[1]),zip(_vclips,_spxs)))
				_complex_fn = lambda vx:(t:=mySpeedx(vx[0],vx[1])).set_audio(apply_volumex(t.audio,vx[1]))
				if aclip and tclip:
					if t_dur > tclip.duration:
						_vclips = list(map(_complex_fn,zip(_vclips,_spxs)))
						_tclips = list(map(lambda tv:tv[0].set_duration(tv[1].duration) if tv[0] else tv[0],zip(_tclips,_vclips)))
						used_spx = org_dur / t_dur
						# add aside
						cur_dur = 0
						split_i = 0
						for i,v in enumerate(_vclips):
							cur_dur += v.duration
							if cur_dur > tclip.duration:
								split_i = i + 1
								break
						_pre_vclip = concatenate_videoclips(_vclips[0:split_i])
						if _tclips[split_i-1]:
							_tclip = _tclips[split_i-1].set_duration(_pre_vclip.duration-tclip.duration)
							_pre_tclip = concatenate_videoclips([tclip,_tclip])
						else:
							_pre_tclip = tclip
						_pre_vtclip = CompositeVideoClip([_pre_vclip, set_tclip_pos(_pre_tclip)])
						if split_i == len(_vclips):
							vtclip = _pre_vtclip
						else:
							_post_vtclips = list(map(lambda vt:(CompositeVideoClip([vt[0],set_tclip_pos(vt[1])]) if vt[1] else vt[0]), zip(_vclips[split_i:],_tclips[split_i:])))
							_post_vtclip = concatenate_videoclips(_post_vtclips)
							vtclip = concatenate_videoclips([_pre_vtclip,_post_vtclip])
					else:
						used_spx = org_dur / tclip.duration
						_spxs2 = list(map(lambda x:x*t_dur/tclip.duration,_spxs))
						_vclips2 = list(map(_complex_fn,zip(_vclips,_spxs2)))
						if (t:=sum(map(lambda v:v.duration,_vclips2))) != tclip.duration:
							_vclips2[-1] = mySpeedx(_vclips2[-1], final_duration=_vclips2[-1].duration+tclip.duration-t)
						vclip = concatenate_videoclips(_vclips2)
						vtclip = CompositeVideoClip([vclip,set_tclip_pos(tclip)])
				else:
					used_spx = org_dur / t_dur
					_vclips = list(map(_complex_fn,zip(_vclips,_spxs)))
					_tclips = list(map(lambda tv:tv[0].set_duration(tv[1].duration) if tv[0] else tv[0],zip(_tclips,_vclips)))
					_vtclips = list(map(lambda vt:(CompositeVideoClip([vt[0],set_tclip_pos(vt[1])]) if vt[1] else vt[0]), zip(_vclips,_tclips)))
					# breakpoint()
					vtclip = concatenate_videoclips(_vtclips)
			else:
				vtclip,aclip = None,None
			anchor = f"({start},{end},{spx*1.0},{asd})"
			res_list.append((anchor,vtclip,aclip,None,efts,bgm,used_spx))
		else:
			# cut out clip
			pass
		vs = end
	if post_vclip and end < ve and speedx > 0:
		# post vclip
		if not only_check:
			vclip = mySpeedx(video.subclip(end/vfps, ve/vfps), speedx)
			vclip.audio = apply_volumex(vclip.audio,speedx)
			if aside:
				tclip = set_text(aside, vclip.duration)
			else:
				tclip = None
		else:
			vclip = tclip = None
		anchor = f"({end},{ve},{speedx*1.0},{aside})"
		res_list.append((anchor,vclip,None,tclip,efts,bgm,speedx))
		vs = ve
	
	return res_list, (vs,ve), stamp

def apply_effects(clip,efts):
	if efts:
		for eft in efts:
			type, para = eft
			if type == "fadein":
				clip = clip.fadein(para)
			elif type == "fadeout":
				clip = clip.fadeout(para)
	return clip

def parse_effects(node):
	assert node.tag == "effects"
	efts = []
	for cnode in node:
		if cnode.tag == "effect":
			type, para = cnode.attrib['type'], eval(cnode.attrib['para'])
			efts.append((type,para))
		else:
			raise Exception(f"pasre_effects#{cnode.tag} tag error")
	return efts

# return (aclip, tclip, stamp)
def parse_dub(node, audio, stamp=0, afps=44100, only_check=False):
	assert node.tag == "dub"
	aclips,tclips = [],[]
	if audio:
		afps = audio.fps
	for cnode in node:
		if cnode.tag == 'aclip':
			para = eval(get_para(cnode.attrib,'para','None'))
			aclip, tclip = None, None
			if type(para) == tuple and len(para) == 2:
				# (start, end)
				assert para[0] < para[1]
				start, end = para
				if not only_check:
					aclip = audio.subclip(start/afps, end/afps)
					tclip = set_text(cnode.text.strip(), aclip.duration)
				stamp = end
			elif isinstance(para, int) or isinstance(para, float):
				if stamp >= para:
					raise Exception(f"parse_dub# stamp={stamp} !< para={para} error")
				# end
				if not only_check:
					aclip = audio.subclip(stamp/afps, para/afps)
					tclip = set_text(cnode.text.strip(), aclip.duration)
				stamp = para
			elif para == None:
				# blank audio
				try:
					dur = eval(cnode.attrib['dur'])
				except:
					raise Exception(f"parse_dub#aclip({cnode.text.strip()}) error")
				if not only_check:
					aclip = get_blankaudio(dur, afps)
					tclip = set_text(cnode.text.strip(), dur)
			else:
				raise Exception(f"parse_dub#->{cnode.tag} para error")
			aclips.append(aclip)
			if tclip:
				tclips.append(tclip)
		else:
			raise Exception(f"parse_dub#->{cnode.tag} tag error")
	return (concatenate_audioclips(aclips), concatenate_videoclips(tclips) if len(tclips) else None, stamp) if not only_check else (len(aclips),len(tclips),stamp)

audio_buf = {'None':None}
def get_blankaudio(dur=1, afps=44100):
	key = 'blank_' + str(afps)
	if key in audio_buf:
		return audio_buf[key].audio_loop(duration=dur)
	else:
		soundarray = np.zeros((afps,2))
		from moviepy.audio.AudioClip import AudioArrayClip
		ablank = AudioArrayClip(soundarray, fps=afps)
		audio_buf[key] = ablank
		return ablank.audio_loop(duration=dur)

video_buf = {'None':None}
def get_blankvideo(dur=1,size=(50,50)):
	key = 'blank_' + str(size)
	iclip = video_buf.setdefault(
		key,
		ImageClip(np.zeros((size[1],size[0],3),dtype=np.uint8)).set_duration(dur)
	)
	return iclip

#scene_map{mark:[anchor,vclip,aclip,tclip,efts,bgm,speedx],...}
scene_map = {}
def parse_scene(node, video, audio, stamp=0, afps=44100, bgm="", only_check=False):
	assert node.tag == "scene"
	type, mark = node.attrib['type'], node.attrib['mark']
	if mark == '':
		raise Exception("parse_scene# mark == '' error")
	elif mark in scene_map:
		raise Exception(f"parse_scene#mark({mark}) repeat error")
	afps = audio.fps if audio else afps
	aside = get_para(node.attrib, 'aside', '')
	bgm = get_para(node.attrib, 'bgm', bgm)
	# (anchor,vclip/iclip,aclip,speedx)
	res_list = []
	if type == 'iclip':
		for cnode in node:
			if cnode.tag == "iclip":
				res, stamp = parse_iclip(cnode, audio, stamp, afps, bgm, only_check)
				res_list.append(res)
			else:
				raise Exception(f"parse_scene#{type}->{cnode.tag} tag error")
	elif type == 'vclip':
		para,speedx = eval(node.attrib['para']), eval(get_para(node.attrib,'speedx','10'))
		assert para[1] >= para[0]
		vnodes = node.findall('vclip')
		for i,cnode in enumerate(vnodes):
			_res_list,para,stamp = parse_vclip(cnode, video, audio, para, speedx, aside, stamp, afps, bgm, i+1==len(vnodes), only_check)
			res_list.extend(_res_list)
	else:
		raise Exception(f"parse_scene#{cnode.tag} tag error")
	scene_map[mark] = res_list
	return stamp

def parse_part(node, afps, bgm="", only_check=False):
	assert node.tag == "part"
	vsrc, asrc = get_para(node.attrib,'vsrc','None'), get_para(node.attrib,'asrc','None')
	stamp = eval(get_para(node.attrib,'stamp','0'))
	bgm = get_para(node.attrib,'bgm',bgm)
	if vsrc == "" or vsrc == "None":
		video_buf[vsrc] = video = None
	else:
		video_buf[vsrc] = video = VideoFileClip(vsrc) if vsrc not in video_buf else video_buf[vsrc]
	if asrc == "" or asrc == "None":
		audio_buf[vsrc] = audio = None
	else:
		audio_buf[vsrc] = audio = AudioFileClip(asrc) if asrc not in audio_buf else audio_buf[asrc]
	
	for cnode in node:
		if cnode.tag == "scene":
			stamp = parse_scene(cnode, video, audio, stamp, afps, bgm, only_check)
		else:
			raise Exception(f"parse_part#{cnode.tag} tag error")

def parse_va(node,flag_write=False,only_check=False):
	assert node.tag == "va"
	bitrate, output, fps, afps = node.attrib['bitrate'], node.attrib['output'], eval(node.attrib['fps']), eval(get_para(node.attrib,'afps','44100'))
	bgm = get_para(node.attrib,'bgm','')
	if os.path.exists(output):
		print(f"{output} exists, skip parsing")
		return None
	
	for cnode in node:
		if cnode.tag == "part":
			parse_part(cnode,afps,bgm,only_check)
		else:
			raise Exception(f"parse_va#{cnode.tag} tag error")
	
	# breakpoint()
	if not only_check:
		print(f"{output} 时长:{sum(map(lambda x:sum(map(lambda y:y[1].duration,x)), scene_map.values())):4}s")
	output_file = os.path.join(script_dir, output+'.mp4')
	
	cur_dir = os.getcwd()
	output_dir = os.path.join(script_dir, output)
	flag_output_update = False
	concat_text = ''
	for mark in scene_map:
		flag_mark_update = False
		concat_text += f"file '{output}\\{mark}.mp4'\n"
		mark_dir = os.path.join(output_dir, mark)
		# mark_file = os.path.join(output_dir, mark+'.mp4')
		if not os.path.exists(mark_dir):
			os.makedirs(mark_dir)
		mark_text = ""
		for anchor,vclip,aclip,tclip,efts,bgm,spx in scene_map[mark]:
			mark_text += f"file '{mark}\\{anchor}.mp4'\n"
			_filename = os.path.join(mark_dir,f"{anchor}.mp4")
			if not only_check:
				vclip_a = vclip.audio
				vclip_v = vclip.set_audio(None)
				vtclip = CompositeVideoClip([vclip_v, set_tclip_pos(tclip)]) if tclip else vclip_v
				vtclip = apply_effects(vtclip, efts)
				_vaclips = list(filter(None,[vclip_a,aclip]))
				if len(_vaclips):
					vaclip = CompositeAudioClip(_vaclips)
				else:
					vaclip = None
				fclip = vtclip.set_audio(vaclip).set_fps(fps)
				if flag_write and not os.path.exists(_filename):
					fclip.write_videofile(_filename, bitrate=bitrate, threads=THREADS, logger=LOGGER)
					flag_mark_update = True
					flag_output_update = True
		if flag_mark_update and not only_check:
			with open(f'{output_dir}\\{mark}.txt', 'w', encoding="utf-8") as f:
				f.write(mark_text)
			os.chdir(output_dir)
			os.system(f"ffmpeg -f concat -safe 0 -i {mark}.txt -c copy -y {mark}.mp4")
			os.chdir(cur_dir)
	
	if (not os.path.exists(output_file) or flag_output_update) and concat_text and not only_check and flag_write:
		os.chdir(script_dir)
		with open(f'{output}.txt','w',encoding="utf-8") as f:
			f.write(concat_text)
		os.system(f"ffmpeg -f concat -safe 0 -i {output}.txt -c copy -y {output}.mp4")
		#shutil.move(output+".mp4", "../")
		os.chdir(cur_dir)
	return output_file

script_dir = "脚本工作区"
bgm_map = {}
THREADS = 4
LOGGER = 'bar'
def parse_script(root):
	assert root.tag == "script"
	run = eval(get_para(root.attrib, 'run', 'False'))
	only_check = eval(get_para(root.attrib, 'onlyCheck', 'False'))
	list_text = ''
	for node in root:
		if node.tag == "va":
			list_text += f"file '{parse_va(node, run, only_check)}'\n"
			scene_map.clear()
		elif node.tag == "bgmList":
			# for cnode in node.findall("bgm"):
				# src = cnode.attrib['src']
				# fps = eval(get_para(cnode.attrib, "fps", "44100"))
				# start = eval(cnode.attrib['start'])/fps
				# end = eval(cnode.attrib['end'])/fps
				# id = cnode.attrib['id']
				# bgm = AudioFileClip(src, fps=fps)
				# bgm_map[id] = bgm,start,end
			pass
		else:
			raise Exception(f"parse_script#{node.tag} tag error")
	with open('list.txt','w',encoding='utf-8') as f:
		f.write(list_text)

if __name__=='__main__':
	args = sys.argv
	if len(args) == 2 and args[1] == 'test':
		fn = 'test.xml'
		script_dir = "脚本测试区"
	else:
		fn = os.path.basename(os.path.abspath('.'))+'.xml'
	with open(fn, encoding='utf-8') as f:
		text = f.read()
	root = ET.fromstring(text)
	parse_script(root)
	
	
	
	