import os
import asyncio
import aiohttp
import aiofiles
import threading
import time
import datetime
import requests
import re
import shutil
from aiohttp import ClientSession
from aiohttp import TCPConnector
from config import appconfig
from logger import logger
from urllib.parse import urlsplit
from pathlib import Path
from shutil import copyfile
from Crypto.Cipher import AES
from utils import ProStatus
# from views.index import ProStatus
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from views.fastdownload import new_download
import tornado.ioloop
from tornado.ioloop import IOLoop
from multiprocessing import Process
# from multiprocessing.dummy import Process
from tornado.queues import Queue
from multiprocessing import Queue as mQueue

http = AsyncHTTPClient()
headers = HTTPHeaders()
headers.update({"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66"})

class Download(object):
	loop = IOLoop.current()

	def __init__(self,db,cacle_dir=os.path.expanduser('~/AppData/Roaming/pyDownload'),max_tries=5,max_tasks=3):
		self.db = db
		self.cacle_dir = cacle_dir.replace('\\', '/')	# 使用相对路径，保存的文件夹名
		self.max_tasks = max_tasks
		self.max_tries = max_tries
		self.new_loop = asyncio.new_event_loop()
		# self.session = ClientSession(loop=self.new_loop)
		self.session = ClientSession(connector=TCPConnector(verify_ssl=False,loop=self.new_loop),loop=self.new_loop)
		self.headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66"}

		self.all_dt = {}		# 保存本地未下载完的
		self.proc_queue = mQueue()
		self.rs = requests.session()
		self.semaphore = asyncio.Semaphore(500)		 # 限制并发量为500,这里windows需要进行并发限制

		# 下载进度初始化
		self.dt_progress = {}	# 保存每一个下载任务
		self.task_num = 3 		# 默认同时下载3个任务
		self.task_dist = {}

		# 播放文件临时存储
		self.play_list = {}	# {section:root_path,}
		# 启动线程
		t = threading.Thread(target=self.start_loop,args=(self.new_loop,))
		t.setDaemon(True)	# 设置子线程为守护线程,即主线程退出时，子线程也会跟着退出，否则主线程要等子线程结束才会退出
		t.start()

		t2 = threading.Thread(target=self.getAllLocalVideo,args=())
		# t2.setDaemon(True)
		t2.start()
		t2.join()

	async def check(self):
		while True:
			if not self.proc_queue.empty():
				try:
					data = self.proc_queue.get_nowait()
					if isinstance(data,dict):
						section = data.get('section')
						flag = data.get('flag')
						tsIndex = data.get('index',0)
						# logger.info('%s %s'%(data,time.time()))
						if flag==True:	# 下载成功
							size = data.get('size',0)
							if size>0:
								self.dt_progress[section]['done_length'] += size
								self.dt_progress[section]['done'] += 1
							self.all_dt[section][tsIndex]['status'] = 2  # 标记该快下载完成
						elif flag=='stop':
							logger.info(flag)
							self.dt_progress[section]['status'] = 9
							self.dt_progress[section]['msg'] = '已暂停'
							self.task_dist[section]['status'] = 0
							self.getTasks()
						elif flag=='end':	# 下载完成
							done = self.dt_progress[section]['done']
							total = self.dt_progress[section]['total']
							content_length = self.dt_progress[section]['content_length']
							done_length = self.dt_progress[section]['done_length']
							indexname = self.dt_progress[section]['indexname']

							section_one = self.db.get("select * from section where id='%s'" % section)
							self.dt_progress[section]['content_length'] = done_length
							section_one.success = 1
							section_one.done = done
							section_one.content_length = done_length
							section_one.done_length = done_length
							self.dt_progress[section]['status'] = 10
							self.dt_progress[section]['msg'] = '下载完成'
							if self.dt_progress[section]['merge'] in (1, 3):
								self.dt_progress[section]['status'] = 11
								self.dt_progress[section]['msg'] = '合并文件...'
								name = await self.convert_m3u(section)
								section_one.success = 2
								self.dt_progress[section]['status'] = 12
								self.dt_progress[section]['msg'] = '合并完成'
								self.dt_progress[section]['indexname'] = name
								indexname = name

							sqlstr = 'update section set success={success},indexname="{indexname}",done={done},content_length={content_length},done_length={done_length} \
									where id="{id}"'.format(success=section_one.success, indexname=indexname,
															done=section_one.done,
															content_length=section_one.content_length,
															done_length=section_one.done_length, id=section)
							self.db.execute(sqlstr)
							ProStatus().triggerUpdateData(self.getStatus(id=section))
							self.task_dist[section]['status']=0
							self.getTasks()
							# 下载完成后自动导入
							if self.dt_progress[section]['type'] in('m3u8','mp4','mkv'):
								self.appendMovie(section,0)
								logger.info(flag)
						elif flag=='total':
							pass
				except Exception as e:
					logger.error(e)
			await asyncio.sleep(0.08)

	def send_subprocess(self,section):
		logger.info('发送暂停信号 %s'%section)
		self.task_dist[section]['queue'].put({'code':1,'status':1})	# 暂停


	def start_loop(self,loop):
		'''为子线程设置事件循环'''
		logger.info('Turn on child thread tid:{}'.format(threading.currentThread().ident))
		asyncio.set_event_loop(loop)
		Download.loop.spawn_callback(self.check)
		loop.run_forever()

	async def stop_loop(self):
		'''子线程停止事件循环'''
		logger.info('Turn off child thread tid:{}'.format(threading.currentThread().ident))
		await self.session.close()
		self.new_loop.stop()

	def getAllLocalVideo(self,section=None):
		# status = int
		# 0无index/1开始下载index/2index下载完成/3index下载失败/4解析index/5解析失败/
		# 6准备下载（排隊中）/7正在下载/8准备暂停/9已暂停/10下载完成/11开始合并/12合并完成/13准备删除/14删除完成/15不支持断点续传/16重新下载
		if section:
			# section_one = DownloadSession.query.filter_by(id=section).first()
			section_one = self.db.get("select * from section where id=%d"%section)
			if section_one:
				self.getLocalVideo(section_one)
		else:
			section_data = self.db.query("select * from section")
			for section_one in section_data:
				self.getLocalVideo(section_one)

	def getLocalVideo(self,section_one):
		""""""
		section = section_one.id
		download_status = section_one.success
		merge = section_one.merge
		name = section_one.name
		author = section_one.author
		indexname = section_one.indexname
		ftype = section_one.ftype
		url = section_one.url
		cacle_path = section_one.cacle_path
		root_path = section_one.root_path
		total = section_one.total
		done = section_one.done
		content_length = section_one.content_length
		done_length = section_one.done_length
		dt = []
		self.all_dt[section] = dt
		self.dt_progress[section] = {'id':section,'indexname':indexname,'play_index':'','name':name,'author':author,'url':url,'root':root_path,
			'cacle_path':section_one.cacle_path,'type':ftype,'total':total,'done':done,'fail':set(),'content_length':content_length,
			'done_length':done_length,'merge':merge,'status':0,'msg':'准备中...'}
		cacle_path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
		logger.info("getLocalVideo for section:%s,status=%s"%(section,download_status))
		if download_status == 1:
			self.dt_progress[section]['content_length'] = content_length
			self.dt_progress[section]['done_length'] = done_length
			self.dt_progress[section]['status'] = 10
			self.dt_progress[section]['msg'] = '下载完成'
			logger.info("download success")
			return 10
		if download_status == 2:
			self.dt_progress[section]['content_length'] = content_length
			self.dt_progress[section]['done_length'] = done_length
			self.dt_progress[section]['status'] = 12
			self.dt_progress[section]['msg'] = '合并完成'
			logger.info("merge success")
			return 12
		if not os.path.exists(cacle_path):
			self.dt_progress[section]['status'] = 14
			self.dt_progress[section]['msg'] = '已删除'
			logger.info("cacle_path is removed")
			return 14
		if (not os.path.exists(os.path.join(cacle_path,indexname))) or os.path.getsize(os.path.join(cacle_path,indexname))<1024:
			self.dt_progress[section]['status'] = 0
			self.dt_progress[section]['msg'] = "索引不存在"
			logger.info("index file is not exist")
			return 0
		with open(os.path.join(cacle_path,indexname),'r') as fd:
			data_ts = fd.readlines()
		dt,total,done,done_length = self.resolveData(section,data_ts,local=True)
		self.dt_progress[section]['total'] = total
		self.dt_progress[section]['done'] = done
		self.dt_progress[section]['done_length'] = done_length
		self.dt_progress[section]['status'] = 9
		self.dt_progress[section]['msg'] = '已暂停'
		self.task_dist[section]={'status':0,'queue':mQueue()}
		return 9

	def resolveData(self,section,data,ftype=None,local=False):
		"""解析数据，返回下载列表,该列表中会包含已经下载完成的块"""
		# 支持解析ts/jpg等等后缀，如果没有后缀应该不支持
		ftype = self.dt_progress[section]['type']
		save_path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
		url = self.dt_progress[section]['url']
		parsed_result = urlsplit(url)
		scheme = parsed_result.scheme	# 协议
		netloc = parsed_result.netloc	# 域名
		netpath = os.path.split(parsed_result.path)[0]	# 路径

		dt = []
		txt = ''
		done,done_length = 0,0
		startTime = 0.0
		endTime = 0.0
		index = 0
		if ftype != 'txt':
			flag = False
			temp_file_list = set()
			temp_data = ''
			# temp_data = ''.join(data)
			for ts in data:
				durTime = 0
				ts = ts.replace('\n','')
				temp_data += ts+'\n'
				if ts == '':
					continue
				if ts.startswith('#'):
					if "URI=" in ts:
						key = re.compile('URI="(.*?)"')
						key = key.findall(ts)[0]
						key = urlsplit(key)
						scheme_key=key.scheme
						netloc_key = key.netloc
						path = os.path.split(key.path)[0]
						file = os.path.split(key.path)[1]
						query = key.query
						status = 0
						if local:	# 检查缓存
							if os.path.exists(os.path.join(save_path,file)):
								done +=1
								status = 2
						# 下载key
						if not path:
							base_url = scheme+'://'+netloc+netpath
						elif scheme_key:
							base_url = scheme_key+'://'+netloc_key+path
						else:
							base_url = scheme+"://"+netloc+path
						dt.append({'index':index,'section':section,'ftype':ftype,'file_name':file,
							'cacle_path':save_path,'base_url':base_url+'/'+file,'query':query,
							'startTime':0,'endTime':0,'Amount':0,'status':status})
						index += 1
					if "EXTINF" in ts:
						durTime = re.compile(':(.*?),')
						durTime = durTime.findall(ts)[0]
						durTime = float(durTime)
					continue
				# 处理ts路径
				temp_ts = ts
				ts = urlsplit(ts)
				scheme_ts = ts.scheme
				netloc_ts = ts.netloc
				path_ts = ts.path
				path = os.path.split(path_ts)[0]
				file = os.path.split(path_ts)[1]
				# 当文件名一样时
				if not file in temp_file_list:
					temp_file_list.add(file)
					temp_name = file
				else:
					flag = True
					i=1
					while i:
						temp_name = file+'_'+str(i)
						if temp_name not in temp_file_list:
							temp_file_list.add(temp_name)
							temp_data = temp_data.replace(temp_ts,temp_ts+'_'+str(i))
							break
						i += 1
				query = ts.query
				status = 0
				startTime = endTime
				endTime += durTime
				if ftype=='m3u8':
					if local:	# 检查缓存
						if os.path.exists(os.path.join(save_path,file)) and  os.path.getsize(os.path.join(save_path,file)) > 10240:
							done +=1 # 完成数量+1
							done_length += os.path.getsize(os.path.join(save_path,file))
							status = 2
					# 下载ts
					if not path:	#只有文件名时
						base_url = scheme+'://'+netloc+netpath
					elif scheme_ts:	#为完整url时
						base_url = scheme_ts+'://'+netloc_ts+ path
					else:   #含有路径时
						base_url = scheme+'://'+netloc+ path
					# logger.info(base_url)
					dt.append({'index':index,'section':section,'ftype':ftype,'file_name':temp_name,
						'cacle_path':save_path,'base_url':base_url+'/'+file,'query':query,
						'startTime':round(startTime,6),'endTime':round(endTime,6),'Amount':0,'status':status})
					index += 1
					if status==0:
						if query:
							txt += base_url+'/'+file+'?'+query+'\n'
						else:
							txt += base_url+'/'+file+'\n'
				if ftype=='mp4':
					start,end,size = os.path.splitext(file)[0].split('_')
					start = int(start)
					size = int(size)
					begin = int(start)
					if local:	# 检查缓存
						if os.path.exists(os.path.join(save_path,file)):
							done_size = os.path.getsize(os.path.join(save_path,file))
							# 文件存在且下载出错时
							if done_size > int(size):
								os.remove(os.path.join(save_path,file))
							# 文件存在且下载完成时
							if done_size == int(size):
								done +=1 # 完成数量+1
								done_length += done_size
								status = 2
							# 文件存在且下载未完成时
							if done_size < int(size):
								done_length += done_size
								begin = start + done_size
					# 下载ts, 因end有可能为空，所以传字符串
					dt.append({'index':index,'section':section,'ftype':ftype,'file_name':file,
					'cacle_path':save_path,'base_url':url,'query':query,
					'start':start,'end':end,'begin':begin,'size':size,
					'startTime':0,'endTime':0,'Amount':0,'status':status})
					index += 1
			if flag:
				with open(os.path.join(os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path']),section),'w') as f:
					f.write(temp_data)
		else:
			for ts in data:
				ts = ts.replace('\n','')
				if ts == '':
					continue
				ts = urlsplit(ts)
				scheme_ts = ts.scheme
				netloc_ts = ts.netloc
				path_ts = ts.path
				path = os.path.split(path_ts)[0]
				file = os.path.split(path_ts)[1]
				query = ts.query
				status = 0
				startTime = 0
				endTime = 0
				if local:	# 检查缓存
					if os.path.exists(os.path.join(save_path,file)) and  os.path.getsize(os.path.join(save_path,file)) > 10240:
						done +=1 # 完成数量+1
						done_length += os.path.getsize(os.path.join(save_path,file))
						status = 2
				# 下载ts
				if not path:	#只有文件名时
					base_url = scheme+'://'+netloc+netpath
				elif scheme_ts:	#为完整url时
					base_url = scheme_ts+'://'+netloc_ts+ path
				else:   #含有路径时
					base_url = scheme+'://'+netloc+ path
				dt.append({'index':index,'section':section,'ftype':ftype,'file_name':file,
					'cacle_path':save_path,'base_url':base_url,'query':query,
					'startTime':round(startTime,6),'endTime':round(endTime,6),'Amount':0,'status':status})
				index += 1
				if status==0:
					if query:
						txt += base_url+'?'+query+'\n'
					else:
						txt += base_url+'\n'
		txt_path = os.path.join(save_path,'index.txt')
		if os.path.exists(txt_path):
			os.remove(txt_path)
		with open(txt_path,'w+') as f:
			f.write(txt)
		self.all_dt[section] = dt
		logger.info("%s: All：%s，Ready(Num)：%s，Ready(Size)：%s"%(section,len(dt),done,done_length))
		return dt,len(dt),done,done_length

	def getStatus(self,id=None,status=0):
		"""更新下载列表"""
		# id 返回指定id的数据
		# status 0返回全部，1返回未下载完成，2返回已下载完成
		filmStatus = []
		def getdata(k,v):
			value = {}
			value['id'] = v['id']
			value['indexname'] = v['indexname']
			value['play_index'] = v['play_index']
			value['name'] = v['name']
			value['author'] = v['author']
			value['url'] = v['url']
			value['type'] = v['type']
			value['total'] = v['total']
			value['done'] = v['done']
			value['fail'] = len(v['fail'])
			value['content_length'] = v['content_length']
			value['done_length'] = v['done_length']
			value['merge'] = v['merge']
			value['status'] = v['status']
			value['msg'] = v['msg']
			if not 'pre_done_length' in self.dt_progress[k].keys():
				v['pre_done_length'] = 0
			value['pre_done_length'] = v['pre_done_length']
			self.dt_progress[k]['pre_done_length'] = self.dt_progress[k]['done_length']
			return value

		if id:
			v = self.dt_progress[id]
			data = getdata(id,v)
			filmStatus.append(data)
		else:
			for k,v in self.dt_progress.items():
				data = getdata(k,v)
				if status==1:
					if data['status'] not in (10,12,14):
						filmStatus.append(data)
				elif status==2:
					if data['status'] in(10.12):
						filmStatus.append(data)
				else:
					filmStatus.append(data)
		return filmStatus

	def getUrl_data(self,url,name=None,data=None):
		 return self.getUrl(url,name,data=data)

	def getUrl(self,url,name=None,path=None,data=None):
		"""获取下载链接"""
		url = url.replace('\\','/')
		url = url.strip()
		name = name.replace(' ','')
		if name=='':
			if url.startswith('http'):
				name = os.path.basename(url)
			else:
				logger.warn('获取不到文件名称')
				return False, '获取不到文件名称!'
			author = ''
		else:
			names = name.split('--')
			if len(names) == 1:
				name, author = name, ''
			elif len(names) == 2:
				name, author = name.split('--')
			elif len(names) > 2:
				name, author = '--'.join(names[:-1]), names[-1]
		path = path.strip()
		if path not in(None,''):
			path = Path(path)
		else:
			path = None
		cfg_films = []
		# 判断文件是否存在
		section_data = self.db.query("select * from section")
		for section_one in section_data:
			section = section_one.id
			cfg_films.append(section_one.name)
		if name in cfg_films:
			logger.warn('文件名称已存在!')
			return False,'文件名称已存在!'
		section = str(round(time.time()*1000))
		if not name:
			name = section
		resp = self.addTask(section,url,name,author,path,data)
		return resp,'创建成功'

	def addTask(self,section,url,name,author,path=None,data=None):
		"""新增下载"""
		if url.startswith('http'):
			parsed_result = urlsplit(url)
			indexname = os.path.split(parsed_result.path)[1]
			(_indexname,ftype) = os.path.splitext(indexname)
			if ftype:
				ftype = ftype[1:]
			else:
				ftype = ''
		else:
			indexname = url.split('/')[-1]
			ftype = 'txt'
		success = 0
		merge = 0
		total = 0
		done = 0
		content_length = 0
		done_length = 0
		fail = set()
		cacle_path = name
		root_path = path if path!=None else self.cacle_dir
		status = 0
		origin_indexname = indexname
		if ftype in('mp4',):
			origin_indexname = indexname
			indexname = _indexname+'.m3u8'
			merge = 1
		# ftype = indexname.split('.')[-1].lower()
		self.dt_progress[section] = {'id':section,'origin_indexname':origin_indexname,'indexname':indexname,'play_index':'',
									 'name':name,'author':author,'url':url,'root':root_path,
			'cacle_path':cacle_path,'type':ftype,'total':total,'done':done,'fail':fail,'content_length':content_length,
			'done_length':done_length,'merge':merge,'status':status,'msg':'准备中...','data':data}
		# 记录到文件
		addtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
		sqlstr = 'insert into section (id,success,merge,total,done,url,indexname,ftype,name,author,content_length,done_length,cacle_path,root_path,addtime) \
			values ("%s",%d,%d,%d,%d,"%s","%s","%s","%s","%s",%d,%d,"%s","%s","%s");' % (section,success,merge,total,done,url,indexname,ftype,name,author,content_length,done_length,cacle_path,root_path,addtime)
		self.db.execute(sqlstr)

		# 新增任务时，推送到前端
		ProStatus().triggerUpdateData(self.getStatus(id=section))

		if ftype == 'm3u8':
			status = self.producer_m3u(section)
		elif ftype == 'mp4':
			status = self.producer_mp4(section)
		elif ftype in ('jpg','png','bmp'):
			status = self.producer_jpg(section)
		elif ftype == '':
			status = self.producer_m3u(section)
		elif ftype == 'txt':
			status = self.producer_txt(section)
		else:
			logger.info('Unsupported file type:%s'%ftype)
			status = 0
		if status:
			self.task_dist[section]={'status':0,'queue':mQueue()}
			self.getTasks(section)

	def getTasks(self,section="0"):
		"""获取一个任务来下载"""
		# status=0暂停之类1排队中2下载中
		# 先把任務設爲排隊中
		index = 0
		task_done = 0
		for section_one in self.task_dist:
			status = self.task_dist[section_one].get('status')
			if section_one==section:
				self.task_dist[section_one]['status']=1
				self.dt_progress[section]['status'] = 6
				self.dt_progress[section]['msg'] = "等待中......"
				ProStatus().triggerUpdateData(self.getStatus(id=section))
				if status == 2: 	# 如果當前正在下載，則立馬繼續下載
					self.task_dist[section_one]['status']=2
					self.start_process(section)
					return
			if status==2:
				task_done += 1

		# 可以下載時
		if task_done<self.task_num:
			for section_one in self.task_dist:
				status = self.task_dist[section_one].get('status')
				if status == 1:
					self.task_dist[section_one]['status']=2
					self.start_process(section_one)
					break

	def start_process(self,section):
		"""启动多进程"""
		logger.info('{} 启动进程'.format(section))
		dt = self.all_dt[section]
		if len(dt):  # 如果有队列任务则创建，没有则更新下载状态
			if not 'pre_done_length' in self.dt_progress[section].keys():
				self.dt_progress[section]['pre_done_length'] = 0
			self.dt_progress[section]['status'] = 7 	#正在下载
			self.dt_progress[section]['msg'] = ''
			path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
			queue = self.task_dist[section]['queue']
			ProStatus().triggerUpdateData(self.getStatus(id=section))
			Process(target=new_download,
					kwargs={'section': section,'name':self.dt_progress[section]['name'],'proc_queue2':queue, 'proc_queue1': self.proc_queue,'path':path,'dt':dt}).start()

	def dropTasks(self,section):
		"""将任务设为停止"""
		self.task_dist[section]['status'] = 0
		self.dt_progress[section]['status'] = 9
		self.dt_progress[section]['msg'] = '已暂停'

	def checkTasks(self,section):
		"""检查当前为神马状态"""
		return self.task_dist[section]['status']

	def getNewMovie(self):
		moviedirs = []
		moviefiles = []
		files = []
		dirs = []
		movies = self.db.query('select * from movie')
		if movies:
			for m in movies:
				if m.ftype == 'm3u8':
					try:
						moviedirs.append(m.path)
					except:
						moviedirs.append(m.path)
				else:
					moviefiles.append(m.name)

		filelist = os.listdir(self.cacle_dir)
		for f in filelist:
			if os.path.isfile(self.cacle_dir+'/'+f):
				if not f in moviefiles:
					files.append(('file',f))
			elif f != 'Images':
				if not f in moviedirs:
					dirs.append(('dir',f))
		movie_list = dirs+files
		return movie_list

	def addNewMovie(self,name,author,ftype):
		"""手动添加电影"""
		path = ''
		msg = ''
		status = False
		filepath = os.path.join(appconfig['video_path'],name)
		section = str(round(time.time() * 1000))
		try:
			if ftype == 'file' and os.path.isfile(filepath):
				ftype = name.split('.')[-1]
				if not ftype:
					ftype = 'mp4'
					path = name
				else:
					path = ''.join(name.split('.')[:-1])
				index_name = name
				msg = '文件解析成功'
				status = True
			elif ftype == 'dir' and os.path.isdir(filepath):
				ftype = 'm3u8'
				index_name = 'temp_index.m3u8'
				path = name
				msg = "未找到索引文件"
				for file in  os.listdir(filepath):
					if file.endswith('.m3u8') and file != 'temp_index.m3u8':
						with open(filepath+'/'+file,'r',encoding='utf-8') as index:
							lines = index.read()
							if not lines.startswith('#EXTM3U'):
								msg = '索引文件解析失败'
								status = False
								logger.info('This index file is not startswith(“#EXTM3U”)')
								continue
							logger.info('Find index file：%s'%filepath)
							try:
								key = re.compile('URI="(.*?)"')
								key_name = key.findall(lines)[0].split('/')[-1]
								rlt,num = re.subn('URI="(.*?)"','URI="/hls/%s/%s/%s"'%(section,name,key_name),lines)
								lines = rlt
							except:
								pass

							class Convert():
								s = lines
								def convert(self,value):
									matched = value.group().replace('\n','')
									if matched and matched != '\n':
										line = '\n/hls/%s/%s/%s'%(section,name,matched.split('/')[-1])
									else:
										line = '\n'
									return line
							c = Convert()
							rlt = re.sub('\n(?!#)(.*).*?',c.convert,Convert.s)
							with open(filepath+'/'+index_name,'w',encoding='utf-8') as file:
								file.write(rlt)
						# url = '/hls/%s/%s'%(name,index_name)
						msg = '索引文件解析成功'
						status = True
						break
		except Exception as ex:
			logger.exception(ex)
			msg = '创建新索引失败'
			status = False
		# 写入数据库
		if status:
			try:
				addtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
				sqlstr = 'insert into movie (id,name,author,logo,cuttype,ftype,indexname,path,root_path,area,addtime) \
					values ("%s","%s","%s","%s",1,"%s","%s","%s","%s",0,"%s");' % (section,name,author,'',ftype,index_name,path,self.cacle_dir,addtime)
				self.db.execute(sqlstr)
				msg = '数据成功写入'
			except:
				msg = "写入数据库失败"
				status = False
		return status,msg

	def producer_txt(self,section):
		self.dt_progress[section]['status'] = 1 	# 开始下载索引文件
		self.dt_progress[section]['msg'] = '解析链接'
		url = self.dt_progress[section]['url']
		self.dt_progress[section]['merge'] = 0
		merge = self.dt_progress[section]['merge']
		name = self.dt_progress[section]['name']
		indexname = self.dt_progress[section]['indexname']
		cacle_path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
		ftype = self.dt_progress[section]['type']
		ProStatus().triggerUpdateData(self.getStatus(id=section))
		# parsed_result = urlsplit(url)
		# scheme = parsed_result.scheme
		# netloc = parsed_result.netloc
		# netpath = os.path.split(parsed_result.path)[0]
		dt = []		# 保存下载任务
		try:
			if os.path.exists(url) and os.path.getsize(url)>0:
				logger.info("txt file exist")
				with open(url,'r') as fd:
					data_ts = fd.readlines()
			else:
				data_ts = None
			if data_ts:
				if not os.path.exists(cacle_path):	#创建目录
					os.makedirs(cacle_path,exist_ok=True)
				# 加入队列
				_,total,done,done_length = self.resolveData(section,data_ts,ftype='txt')
				# 更新数据库
				sqlstr = 'update section set total={total},done={done},url="{url}",done_length={done_length} where id="{id}"'.format(total=total,done=done,url=url,done_length=done_length,id=section)
				try:
					# logger.info(sqlstr)
					# 更新时可能该任务已经被删除，不会报错
					self.db.execute(sqlstr)
					self.dt_progress[section]['done'] = done
					self.dt_progress[section]['done_length'] = done_length
					self.dt_progress[section]['status'] = 6
					self.dt_progress[section]['msg'] = '等待中......'
					self.dt_progress[section]['total'] = total
					ProStatus().triggerUpdateData(self.getStatus(id=section))
					return True
				except:
					# raise
					return False
			return False
		except:
			return False

	def producer_jpg(self,section):
		self.dt_progress[section]['status'] = 1 	# 开始下载索引文件
		self.dt_progress[section]['msg'] = '解析链接'
		url = self.dt_progress[section]['url']
		self.dt_progress[section]['merge'] = 0
		merge = self.dt_progress[section]['merge']
		name = self.dt_progress[section]['name']
		indexname = self.dt_progress[section]['indexname']
		cacle_path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
		ftype = self.dt_progress[section]['type']
		parsed_result = urlsplit(url)
		scheme = parsed_result.scheme
		netloc = parsed_result.netloc
		netpath = os.path.split(parsed_result.path)[0]
		dt = []		# 保存下载任务
		ProStatus().triggerUpdateData(self.getStatus(id=section))
		try:
			link = re.compile('(\[(\d+)-(\d+)\])')
			links = link.findall(indexname)[0]
			linklen = len(links[1])
			for i in range(int(links[1]),int(links[2])+1):
				s = '%0{}d'.format(linklen)
				a=lambda i,s:s%i
				file = indexname.replace(links[0],a(i,s))
				base_url = scheme+'://'+netloc+netpath
				dt.append({'section':section,'ftype':ftype,'file_name':file,'cacle_path':cacle_path,'base_url':base_url,'Amount':0,'status':0})
		except:
			file = indexname
			base_url = scheme+'://'+netloc+netpath
			dt.append({'section':section,'ftype':ftype,'file_name':file,'cacle_path':cacle_path,'base_url':base_url,'Amount':0,'status':0})
		total = len(dt)
		done = 0
		done_length = 0
		if not os.path.exists(cacle_path):	#创建目录
				os.makedirs(cacle_path,exist_ok=True)

		# 记录到文件
		sqlstr = 'update section set ftype="{ftype}",total={total},done={done},url="{url}",done_length={done_length} where id="{id}"'.format(ftype=ftype,total=total,done=done,url=url,done_length=done_length,id=section)
		self.db.execute(sqlstr)
		self.dt_progress[section]['status'] = 6
		self.dt_progress[section]['msg'] = '等待中......'
		self.dt_progress[section]['total'] = total
		ProStatus().triggerUpdateData(self.getStatus(id=section))
		return dt

	def producer_mp4(self,section):
		self.dt_progress[section]['status'] = 4 	# 解析链接
		self.dt_progress[section]['msg'] = '解析链接'
		url = self.dt_progress[section]['url']
		self.dt_progress[section]['merge'] = 1 		# 合并文件
		merge = self.dt_progress[section]['merge']
		name = self.dt_progress[section]['name']
		indexname = self.dt_progress[section]['indexname']
		indexname = os.path.splitext(indexname)[0]+'.m3u8'
		ftype = self.dt_progress[section]['type']
		cacle_path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
		parsed_result = urlsplit(url)
		scheme = parsed_result.scheme
		netloc = parsed_result.netloc
		netpath = os.path.split(parsed_result.path)[0]

		requests.packages.urllib3.disable_warnings()
		r = self.rs.get(url,stream=True,verify=False)
		url = r.url
		# 先判断是否支持断点续传
		try:
			Content_Length = int(r.headers['Content-Length'] if 'Content-Length' in r.headers else 0)
			Accept_Ranges = r.headers['Accept-Ranges']
		except Exception as e:
			Accept_Ranges = ''

		content_length = Content_Length

		self.dt_progress[section]['content_length'] = Content_Length

		if not Accept_Ranges:
			# 不支持断点续传,再接口请求试一下
			try:
				headers = {'Range': 'bytes=0-10240'} 
				r = self.rs.get(url,headers=headers,stream=True,verify=False)
			except:		# 仍然请求失败
				self.dt_progress[section]['status'] = 15
				self.dt_progress[section]['msg'] = '不支持断点续传'
				return False

		# 判断索引文件是否已经生成
		if os.path.exists(os.path.join(cacle_path,indexname)) and os.path.getsize(os.path.join(cacle_path,indexname))>5*1024:
			with open(os.path.join(cacle_path,indexname),'r') as fd:
				data_ts = fd.readlines()
		# 生成索引文件
		else:
			# 支持断点续传方法一
			if Content_Length <= 1024*1024*200:	# 小于200M
				per_size = 1024*512
			elif Content_Length<=1024*1024*1024:
				per_size = 1024*1024
			else:
				per_size = 1024*1024*5

			# 判断是否创建文件夹
			if not os.path.exists(cacle_path):	#创建目录
				os.makedirs(cacle_path,exist_ok=True)

			with open(os.path.join(cacle_path,indexname),'w') as fd:
				fd.write('#EXTM3U\n')
				fd.write('#EXT-X-VERSION:3\n')
				fd.write('#EXT-X-TARGETDURATION:8\n')
				fd.write('#EXT-X-PLAYLIST-TYPE:VOD\n')
				fd.write('#EXT-X-MEDIA-SEQUENCE:0\n')
				fd.write('#EXT-CONTENT-LENGTH:%s\n'%Content_Length)
				fd.write('#EXT-ACCEPT-RANGES:%s\n'%Accept_Ranges)
				fd.write('#EXT-PER-SIZE:%s\n'%per_size)
				fd.write('#EXT-TOTAL-PART:\n')

				# 保存片段
				start_byte = 0 		# 片段的名字
				data_ts = []				# 保存下载任务
				while start_byte<Content_Length:
					# 获取片段
					tmp_start = start_byte	# 块的开始字节
					tmp_begin = tmp_start	# 块从这个字节开始下载
					next_byte = start_byte+per_size

					if round((Content_Length-next_byte+1)/per_size):
						tmp_end = start_byte + per_size -1
						tmp_size = per_size
					else:
						tmp_end = ''
						tmp_size = Content_Length - start_byte
						next_byte = Content_Length
					ts_name = str(tmp_start)+'_'+str(tmp_end)+'_'+str(tmp_size)+'.ts'
					fd.write('#EXTINF:0,\n')
					fd.write(ts_name+'\n')
					data_ts.append(ts_name)
					start_byte = next_byte
				fd.write('#EXT-X-ENDLIST')
		# 记录状态
		self.dt_progress[section]['status'] = 4 	# 解析索引文件
		self.dt_progress[section]['msg'] = '解析索引文件'
		# 加入队列
		_,total,done,done_length = self.resolveData(section,data_ts)
		# 记录到文件
		sqlstr = 'update section set ftype="{ftype}",total={total},done={done},url="{url}",content_length={content_length},done_length={done_length} where id="{id}"'.format(ftype=ftype,total=total,done=done,url=url,content_length=content_length,done_length=done_length,id=section)
		self.db.execute(sqlstr)
		self.dt_progress[section]['done'] = done
		self.dt_progress[section]['done_length'] = done_length
		self.dt_progress[section]['status'] = 6
		self.dt_progress[section]['msg'] = '等待中......'
		self.dt_progress[section]['total'] = total
		return True

	def producer_m3u(self,section):
		'''检查要下载的网络文件'''
		self.dt_progress[section]['status'] = 4 	# 解析链接
		self.dt_progress[section]['msg'] = '解析链接'
		url = self.dt_progress[section]['url']
		self.dt_progress[section]['merge'] = 0
		merge = self.dt_progress[section]['merge']
		name = self.dt_progress[section]['name']
		indexname = self.dt_progress[section]['indexname']
		ftype = self.dt_progress[section]['type']
		data = self.dt_progress[section].get('data',None)
		save_path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
		parsed_result = urlsplit(url)
		scheme = parsed_result.scheme
		netloc = parsed_result.netloc
		netpath = os.path.split(parsed_result.path)[0]
		ProStatus().triggerUpdateData(self.getStatus(id=section))
		# 檢查是否有data
		if data:
			logger.info("get m3u8 file data")
			data_ts = data.split('\r\n')
			if not os.path.exists(save_path):	#创建目录
				os.makedirs(save_path,exist_ok=True)
			with open(os.path.join(save_path,indexname),'w+') as fd:
				for d in data_ts:
					if len(d)>0:
						fd.write(d+'\n')

		# 检查索引文件是否存在
		# print(os.path.join(save_path,indexname))
		elif os.path.exists(os.path.join(save_path,indexname)) and os.path.getsize(os.path.join(save_path,indexname))>2*1024:
			logger.info("index file exist")
			with open(os.path.join(save_path,indexname),'r') as fd:
				data_ts = fd.readlines()
		# logger.info(data_ts)
		# 下载文件
		else:
			logger.info("{} index file not exist".format(section))
			self.dt_progress[section]['msg'] = '开始下载索引文件'
			ProStatus().triggerUpdateData(self.getStatus(id=section))
			if not os.path.exists(save_path):	#创建目录
				os.makedirs(save_path,exist_ok=True)
			status_code = 200
			for i in range(self.max_tries):
				try:
					r = self.rs.get(url,timeout=10,verify=False)
					status_code = r.status_code
					self.dt_progress[section]['status'] = 2
					self.dt_progress[section]['msg'] = '索引文件下载完成'
					break
				except:
					self.dt_progress[section]['status'] = 1
					self.dt_progress[section]['msg'] = '索引文件重试下载中...'
					ProStatus().triggerUpdateData(self.getStatus(id=section))
					continue
			else:
				self.dt_progress[section]['status'] = 3
				self.dt_progress[section]['msg'] = '索引文件下载失败'
				ProStatus().triggerUpdateData(self.getStatus(id=section))
				return False

			# 鉴权校验
			if status_code!=200:
				self.dt_progress[section]['status'] = 5
				self.dt_progress[section]['msg'] = 'status_code=%s'%status_code
				ProStatus().triggerUpdateData(self.getStatus(id=section))
				return False
			#先判断文件是否正确
			t_urls = r.text.split('\n')
			# logger.info(t_urls)
			if not t_urls[0].startswith('#EXTM3U'):
				logger.info("index file is bad")
				self.dt_progress[section]['status'] = 5
				self.dt_progress[section]['msg'] = '索引文件内容异常'
				ProStatus().triggerUpdateData(self.getStatus(id=section))
				return False
			else:
				for t_url in t_urls:
					if t_url[0] =='#':
						if t_url.startswith('#EXTINF'):
							logger.info("{} index file is ok".format(section))
							break
					elif t_url != '':
						p_rlt = urlsplit(t_url)
						with open(os.path.join(save_path,'indexname.m3u8'),'wb') as fd:
							fd.write(r.content)
						if os.path.split(p_rlt.path)[-1] == 'index.m3u8':	#重新下载
							logger.info("update index file")
							self.dt_progress[section]['status'] = 1
							self.dt_progress[section]['msg'] = '索引文件更新中...'
							ProStatus().triggerUpdateData(self.getStatus(id=section))
							if not p_rlt.scheme:	# 不是完整url链接
								if not t_url.startswith('/'):
									t_url = '/'+ t_url
								url = scheme+'://'+netloc+netpath+t_url
								self.dt_progress[section]['url'] = url
							logger.info("update url: %s"%url)
							for _ in range(self.max_tries):
								try:
									r = self.rs.get(url,timeout=10)
									self.dt_progress[section]['status'] = 2
									self.dt_progress[section]['msg'] = '索引文件更新完成'
									break
								except:
									continue
							else:
								self.dt_progress[section]['status'] = 3
								self.dt_progress[section]['msg'] = '索引文件更新失败'
								ProStatus().triggerUpdateData(self.getStatus(id=section))
								return False
						elif t_url.startswith('/'):		# 如果不是index.m3u8后缀
							logger.info("update index file")
							self.dt_progress[section]['status'] = 1
							self.dt_progress[section]['msg'] = '索引文件更新中...'
							ProStatus().triggerUpdateData(self.getStatus(id=section))
							url = scheme+'://'+netloc+t_url
							self.dt_progress[section]['url'] = url
							logger.info("update url: %s"%url)
							for _ in range(self.max_tries):
								try:
									r = self.rs.get(url,timeout=10)
									self.dt_progress[section]['status'] = 2
									self.dt_progress[section]['msg'] = '索引文件更新完成'
									break
								except:
									continue
							else:
								self.dt_progress[section]['status'] = 3
								self.dt_progress[section]['msg'] = '索引文件更新失败'
								ProStatus().triggerUpdateData(self.getStatus(id=section))
								return False
						break

			# 得到正确的索引文件,保存

			with open(os.path.join(save_path,indexname),'wb') as fd:
				fd.write(r.content)

			data_ts = r.text.split('\n')
		# 记录状态
		self.dt_progress[section]['status'] = 4 	# 解析索引文件
		self.dt_progress[section]['msg'] = '解析索引文件'
		ProStatus().triggerUpdateData(self.getStatus(id=section))
		# 判断索引文件是否正确
		if not '#EXTM3U' in data_ts[0]:
			self.dt_progress[section]['status'] = 5 	# 解析索引文件
			self.dt_progress[section]['msg'] = '解析索引文件失败'
			ProStatus().triggerUpdateData(self.getStatus(id=section))
			return False
		if ftype=='':
			self.dt_progress[section]['type'] = 'm3u8'
			ftype = self.dt_progress[section]['type']
		# 加入队列
		_,total,done,done_length = self.resolveData(section,data_ts,ftype=None)

		# 更新数据库
		sqlstr = 'update section set ftype="{ftype}",total={total},done={done},url="{url}",done_length={done_length} where id="{id}"'.format(ftype=ftype,total=total,done=done,url=url,done_length=done_length,id=section)
		try:
			# logger.info(sqlstr)
			# 更新时可能该任务已经被删除，不会报错
			self.db.execute(sqlstr)
			self.dt_progress[section]['done'] = done
			self.dt_progress[section]['done_length'] = done_length
			self.dt_progress[section]['status'] = 6
			self.dt_progress[section]['msg'] = '等待中......'
			self.dt_progress[section]['total'] = total
			ProStatus().triggerUpdateData(self.getStatus(id=section))
			return True
		except:
			# raise
			return False

	async def distribute_task(self,section):
		'''将任务放在子线程中分发,并等待任务完成'''
		logger.info('start progress-id for {}'.format(section))
		dt = self.all_dt[section]
		if len(dt):	# 如果有队列任务则创建，没有则更新下载状态
			if not 'tsIndex' in self.dt_progress[section].keys():
				self.dt_progress[section]['tsIndex'] = 0
			if not 'pre_done_length' in self.dt_progress[section].keys():
				self.dt_progress[section]['pre_done_length'] = 0
			self.dt_progress[section]['status'] = 7 	#正在下载
			self.dt_progress[section]['msg'] = ''
			ProStatus().triggerUpdateData(self.getStatus(id=section))
			sub_workers = [asyncio.ensure_future(self.download(section)) for _ in range(self.max_tasks)]
			#在协程内等待结果. 通过await 来交出控制权, 同时等待tasks完成
			task_done,task_pending = await asyncio.wait(sub_workers)
			# for i in task_done:
			# 	logger.info(i.result())
				# logger.info(i.name)
			# for task in asyncio.Task.all_tasks():
			# 	logger.info(task.result())
		else:
			logger.info('download list is None')
			# return
		# 如果是删除
		if self.dt_progress[section]['status'] == 13:	# 13准备删除
			self.deleteData(section,1)	# 还在下载中就去删除，那一定是删除文件了
			return
		# 任务结束，发送消息
		logger.info('Task End')
		done = self.dt_progress[section]['done']
		total = self.dt_progress[section]['total']
		content_length = self.dt_progress[section]['content_length']
		done_length = self.dt_progress[section]['done_length']
		indexname = self.dt_progress[section]['indexname']
		# q.put('\ntask-end: {} | ({}/{}) {:.2%}'.format(section,done,total,done/total))

		# 合并文件,合并完成写入文件
		section_one = self.db.get("select * from section where id='%s'"%section)
		# section_one = DownloadSession.query.filter_by(id=section).first()
		if done == total:	# 下载完成
			self.dt_progress[section]['content_length'] = done_length
			section_one.success = 1
			section_one.done = done
			section_one.content_length = done_length
			section_one.done_length = done_length
			self.dt_progress[section]['status'] = 10
			self.dt_progress[section]['msg'] = '下载完成'
			if self.dt_progress[section]['merge'] in (1,3):
				self.dt_progress[section]['status'] = 11
				self.dt_progress[section]['msg'] = '合并文件...'
				name = await self.convert_m3u(section)
				section_one.success = 2
				self.dt_progress[section]['status'] = 12
				self.dt_progress[section]['msg'] = '合并完成'
				self.dt_progress[section]['indexname'] = name
				indexname = name
		else:
			section_one.done = done
			self.dt_progress[section]['status'] = 9
			self.dt_progress[section]['msg'] = '已暂停'
			self.all_dt[section] = dt

		sqlstr = 'update section set success={success},indexname="{indexname}",done={done},content_length={content_length},done_length={done_length} \
		where id="{id}"'.format(success=section_one.success,indexname=indexname,done=section_one.done,content_length=section_one.content_length,done_length=section_one.done_length,id=section)
		self.db.execute(sqlstr)

		ProStatus().triggerUpdateData(self.getStatus(id=section))
		# 删除已下载结束的任务
		# self.dt_progress.pop(section)
		self.dropTasks(section)
		self.getTasks()

	async def download(self,section):
		'''下载文件'''
		dt_ok = None
		while self.dt_progress[section]['tsIndex']<self.dt_progress[section]['total']:
			if self.dt_progress[section]['status'] in (8,9,13):		# 暂停下载
				break
			dt_ok = False 	# 下载完成标记
			# key = dt.pop(0)
			tsIndex = self.dt_progress[section]['tsIndex']
			# 先计算下一个要下载的索引值
			if tsIndex<self.dt_progress[section]['total']-1:
				self.dt_progress[section]['tsIndex']  = tsIndex + 1
			else:
				for i in range(len(self.all_dt[section])):
					if self.all_dt[section][i]['status'] in (0,3):
						self.dt_progress[section]['tsIndex'] = i
						break
				else:
					self.dt_progress[section]['tsIndex'] = self.dt_progress[section]['total']
			# 处理当前索引值
			key = self.all_dt[section][tsIndex]
			if not key['status'] in (0,3): 	# 0未下载，1下载中，2下载完成，3下载失败
				continue

			# if key['Amount'] >= 3:	# 已经失败多次，不能再下载了
			# 	continue

			# 开始下载该块
			# logger.info(tsIndex)
			self.all_dt[section][tsIndex]['status'] = 1 	# 标记该块下载zhong


			# logger.info(key)
			if key['ftype'] == 'm3u8':
				file_name = key['file_name']
				file_path = os.path.join(key['cacle_path'],file_name)
				url = key['base_url']
				if key['query']:
					url = url + '?'+key['query']
				# logger.info(url)
				# logger.info('starting %s'%file_name)
				for i in range(self.max_tries):
					try:
						async with self.session.get(url,headers=self.headers,timeout=5) as resp:
							async with aiofiles.open(file_path,'wb') as fd:
								data = await resp.read()
								if file_name.endswith('key'):
									await fd.write(data)
								else:
									if len(data)>2048:	# KEY.KEY很小
										await fd.write(data)
									else:
										break
						self.dt_progress[key['section']]['done_length'] += os.path.getsize(file_path)
						dt_ok = True
						break
					except Exception as e:
						if i==0:
							err = 'download failed %s times for url: %s  Error:%s'%(i+1,url,e)
						else:
							err = 'download failed %s times for file: %s'%(i+1,file_name)
						logger.info(err)
						pass
					if self.dt_progress[section]['status'] in (8,9,13):		# 取消下载
						self.all_dt[section][tsIndex]['status'] = 0 	# 还原下载状态
						break
			elif key['ftype'] == 'mp4':
				file_name = key['file_name']
				file_path = os.path.join(key['cacle_path'],file_name)
				url = key['base_url']
				# logger.info(url)
				for i in range(self.max_tries):
					try:
						headers = {'Range': 'bytes=%s-%s'%(str(key['begin']),key['end'])}
						async with self.session.get(url,headers=headers,timeout=5) as resp:
							async with aiofiles.open(file_path,'ab') as fd:
								while True:
									data = await resp.content.read(20480) 	#每次最多寫入20k
									status = resp.status
									if status not in (200,206):
										# key['Amount'] +=1
										break
									if not data:
										break
									await fd.write(data)
									self.dt_progress[key['section']]['done_length'] += len(data)
					except asyncio.CancelledError:
						# raise
						break
					except Exception as e:
						# 会有下载失败的情况
						# 下载失败，表示这块下载到一半就失败了，那么剩下的一半可以修改请求头，循环继续请求
						# logger.info('块下载失败')
						if os.path.exists(file_path):
							tmp_size = os.path.getsize(file_path)
							if tmp_size > key['begin'] - key['start']:		# 表示又下载了部分
								key['begin'] = key['start'] + tmp_size	# 更新key['begin']
								self.all_dt[section][tsIndex]['begin'] = key['begin']
							else:
								pass
								# key['Amount'] +=1
					if os.path.exists(file_path) and os.path.getsize(file_path)==int(key['size']):
						dt_ok = True
						break
					if os.path.exists(file_path) and os.path.getsize(file_path)>int(key['size']):
						os.remove(file_path)
						self.all_dt[section][tsIndex]['status'] = 0
						self.all_dt[section][tsIndex]['begin'] = key['start']
					if self.dt_progress[section]['status'] in (8,9,13):		# 取消下载
						self.all_dt[section][tsIndex]['status'] = 0 	# 还原下载状态
						break
			elif key['ftype'] == 'http':
				file_name = key['file_name']
				file_path = os.path.join(key['cacle_path'],file_name)
				url = key['url']
				while key['Amount'] < self.max_tries:
					try:
						headers = {'Range': 'bytes=%s-%s'%(key['begin'],key['end'])}
						async with self.session.get(url,headers=headers,timeout=5) as resp:
							async with aiofiles.open(file_path,'ab') as fd:
								while True:
									data = await resp.content.read(20480) 	#每次最多寫入20k
									status = resp.status
									if status not in (200,206):
										key['Amount'] +=1
										break
									if not data:
										break
									await fd.write(data)
									self.dt_progress[key['section']]['done_length'] += len(data)	#下载字节数增加
					except asyncio.CancelledError:
						# raise
						break
					except Exception as e:
						# 会有下载失败的情况
						# 下载失败，表示这块下载到一半就失败了，那么剩下的一半可以修改请求头，循环继续请求
						# logger.info('块下载失败')
						if os.path.exists(file_path):
							tmp_size = os.path.getsize(file_path)
							if tmp_size > key['begin'] - key['start']:		# 表示又下载了部分
								key['begin'] = key['start'] + tmp_size	# 更新key['begin']
							else:
								key['Amount'] +=1

					if os.path.exists(file_path) and os.path.getsize(file_path)>=key['size']:
						dt_ok = True
						break
					if self.dt_progress[section]['status'] in (8,9,13):		# 取消下载
						self.all_dt[section][tsIndex]['status'] = 0 	# 还原下载状态
						break
				if not dt_ok:
					key['Amount'] = 0	# 重置失败次数
			else:
				file_name = key['file_name']
				file_path = os.path.join(key['cacle_path'],file_name)
				url = key['base_url'] + '/' + file_name
				# logger.info(url)
				for i in range(self.max_tries):
					try:
						async with self.session.get(url,timeout=5) as resp:
							async with aiofiles.open(file_path,'wb') as fd:
								await fd.write(await resp.read())
						self.dt_progress[key['section']]['done_length'] += os.path.getsize(file_path)
						dt_ok = True
						break
					except:
						pass
					if self.dt_progress[section]['status'] in (8,9,13):		# 取消下载
						self.all_dt[section][tsIndex]['status'] = 0 	# 还原下载状态
						break
			if dt_ok:	# 若是下载完成
				# logger.info('success :%s'%file_name)
				self.all_dt[section][tsIndex]['status'] = 2 	# 标记该快下载完成
				self.dt_progress[key['section']]['done'] += 1	#当前文件下载块数增加
			else:
				self.all_dt[section][tsIndex]['status'] = 3 	# 标记该快下载失败
				self.dt_progress[key['section']]['fail'].add(file_name)	# 失败文件加入集合中
				key['Amount'] += 1

		return  dt_ok

	def redownload(self,section):
		"""重新下载"""
		if self.dt_progress[section]['status']==7:
			self.send_subprocess(section)	# 先发送暂停信号
			self.dt_progress[section]['msg'] = '等待暂停'
			return False
		self.dt_progress[section]['status'] = 0
		self.dt_progress[section]['msg'] = ''
		section_one = self.db.get('select * from section where id="%s"'%section)
		section_one.success = 0
		status = self.getLocalVideo(section_one)
		if status==0:
			status = self.producer_m3u(section)
		if status!=False:
			self.getTasks(section)
		ProStatus().triggerUpdateData(self.getStatus(id=section))
		return True

	def stopTask(self,section):
		"""开始/暂停"""
		try:
			st = self.dt_progress[section]['status']
			if st in(0,3,5):	#无index 下载失败 解析失败时
				ftype = self.dt_progress[section]['type']
				if ftype == 'm3u8':
					status = self.producer_m3u(section)
				elif ftype == 'mp4':
					status = self.producer_mp4(section)
				elif ftype in ('jpg','png','bmp'):
					status = self.producer_jpg(section)
				else:
					logger.info('Unsupported file type:%s'%ftype)
					status = 0
				if status:
					self.getTasks(section)
			elif st == 6: 	# 等待中時
				if self.checkTasks(section) == 1:
					self.dropTasks(section)
			elif st == 7: 	# 下载中時
				self.dt_progress[section]['status'] = 8
				self.dt_progress[section]['msg'] = '暂停中...'
				self.send_subprocess(section)
				self.getTasks()
			elif st == 8:	# 暂停中时
				pass
			elif st == 9:	# 已暂停时
				# logger.info(self.all_dt[section])
				if len(self.all_dt[section]):
					for i in range(len(self.all_dt[section])):
						if self.all_dt[section][i]['status'] == 3:
							self.all_dt[section][i]['Amount'] = 0
				else:
					self.getLocalVideo(section)
				self.getTasks(section)
			else:
				pass

			ProStatus().triggerUpdateData(self.getStatus(id=section))
			return True
		except:
			return False

	def deleteTask(self,section,dtype):
		try:
			st = self.dt_progress[section]['status']	# 判断是否存在
			if st == 6: 			# 等待中時
				self.dropTasks(section)
				msg = self.deleteData(section,dtype)
				ProStatus().triggerDeleteData(section)
			if st == 7:				# 正在下载
				self.dt_progress[section]['status'] = 13
				self.dt_progress[section]['msg'] = '准备删除'
				self.send_subprocess(section)
				msg = '操作成功'

			elif st in (0,3,5,9,10,12,14):		# 无索引文件、索引下载失败、解析失败、已暂停、下载完成、合并完成、已删除
				msg = self.deleteData(section,dtype)
				ProStatus().triggerDeleteData(section)
			else:
				msg = '状态不对，当前状态为%s'%st
		except:
			msg = '获取状态失败'
		return msg

	def deletemovie(self,id,movie=None):
		"""删除电影"""
		msg = ''
		if movie==None:
			movie = self.db.get('select * from movie where id=%s'%id)
		if movie.ftype == 'm3u8':
			file_path = os.path.join(movie.root_path,movie.path) if movie.path else None
			logo_path = os.path.join(movie.root_path,'Images/'+movie.logo) if movie.logo else None
		else:
			if movie.path=="Cutfolder":
				path = os.path.join(movie.path,movie.name)
				file_path = os.path.join(movie.root_path,path)
			else:
				file_path = os.path.join(movie.root_path,movie.name) if movie.name else None
			logo_path = os.path.join(movie.root_path,'Images/'+movie.logo) if movie.logo else None
		try:
			if logo_path and os.path.isfile(logo_path):
				os.remove(logo_path)
			if file_path and os.path.isdir(file_path):
				shutil.rmtree(file_path,True)
			if file_path and os.path.isfile(file_path):
				os.remove(file_path)
			self.db.execute('delete from movie where id="%s"'%id)
		except:
			msg = "%s"%id
		return msg

	def deleteData(self,section,dtype=0):
		try:
			section_one = self.db.get('select * from section where id="%s"'%section)
		except:
			return '数据库中找不到该session'
		cacle_path = os.path.join(self.cacle_dir,section_one.cacle_path)
		# 删除缓存文件
		msg = '操作成功'
		if dtype:
			try:
				for root,dirs,files in os.walk(cacle_path):
					for file in files:
						os.remove(os.path.join(root,file))
					for d in dirs:
						os.rmdir(os.path.join(root,d))
				os.rmdir(cacle_path)
			except Exception as e:
				msg = repr(e)
		else:
			pass
		self.db.execute('delete from section where id="%s"'%section)
		self.dt_progress.pop(section)
		self.all_dt.pop(section)
		self.task_dist.pop(section)
		return msg

	def appendMovie(self,section,area=0):
		"""导入电影"""
		name = self.dt_progress[section]['name']
		author = self.dt_progress[section]['author']
		ftype = self.dt_progress[section]['type']
		indexname = self.dt_progress[section]['indexname']
		path = self.dt_progress[section]['cacle_path']
		root_path = self.dt_progress[section]['root']
		addtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
		sqlstr = 'insert into movie (id,name,author,logo,cuttype,ftype,indexname,path,root_path,area,addtime) \
			values ("%s","%s","%s","%s",1,"%s","%s","%s","%s","%d","%s");' % (section,name,author,'',ftype,indexname,path,root_path,area,addtime)
		self.db.execute(sqlstr)
		self.dt_progress.pop(section)
		self.db.execute('delete from section where id="%s"'%section)

	def getMovieForm(self,movie=None,**kwargs):
		"""
		:param movie db数据
		"""
		if movie:
			return movie
		movie = {
			'section': kwargs.get('section',''),
			'name': kwargs.get('name',''),
			'author': kwargs.get('author',''),
			'url': kwargs.get('url',''),		# 播放链接
			'urls': kwargs.get('urls',[]),		# 播放链接序列
			'indexname': kwargs.get('indexname',''),	# 暂未用到
			'logo': kwargs.get('logo',''),
			'ftype': kwargs.get('ftype','m3u8'),
			'cuttype': kwargs.get('cuttype',1),	# 1原视频 2剪切片段 3合并的片段
			'area': kwargs.get('area',1)		# 1jp 2cn
		}
		return movie

	def getplayurl(self,section,status=None,tag=0):
		"""获取播放url
		:tag 0用于边下边播，1用于已导入完成"""
		try:
			status = self.dt_progress[section]['status']
		except:
			pass
		if status in (None,10,12):
			movie = self.checkIndexFile(section,tag=1)
		else:
			movie = self.checkIndexFile(section,tag=tag)
			self.dt_progress[section]['play_index'] = movie['url']
		return movie

	def checkIndexFile(self,section,tag=0):
		"""生成边下边播文件"""
		# tag: 0-用于自定义播放，1-用户正常播放
		# 从section文件取，而不是从indexname文件取
		# Return:
		# 	name: 
		# 	url: 播放链接
		# 	urls:	播放链接序列
		# 	ftype:	视频类型
		# 	indexname:	
		# 	cuttype: 1原视频 2剪切片段 3合并的片段
		# 	area :	1jp 2cn
		urllist = []
		if tag==0:
			index_name = 'play_index.m3u8'
			indexname = self.dt_progress[section]['indexname']
			name = self.dt_progress[section]['name']
			path = self.dt_progress[section]['cacle_path']
			cacle_path = os.path.join(self.dt_progress[section]['root'],self.dt_progress[section]['cacle_path'])
			ftype = self.dt_progress[section]['type']
			author = self.dt_progress[section]['author']
			cuttype = 1
			area = 1
		elif tag==1:
			index_name = "temp_index.m3u8"
			movie = self.db.get('select * from movie where id="%s"'%section)
			if movie:
				path = movie.path
				cacle_path = os.path.join(movie.root_path,movie.path)
				name = movie.name
				ftype = movie.ftype
				indexname = movie.indexname
				author = movie.author
				cuttype = movie.cuttype
				area = movie.area
				if not indexname:
					indexname = ""
					for root,dirs,files in os.walk(cacle_path):
						for file in files:
							if file.endswith('.m3u8'):
								indexname = file
								break
				if not indexname:
					logger.info("Dont find index file!")
			else:	# 尚未导入时
				indexname = self.dt_progress[section]['indexname']
				name = self.dt_progress[section]['name']
				path = self.dt_progress[section]['cacle_path']
				cacle_path = os.path.join(self.dt_progress[section]['root'], self.dt_progress[section]['cacle_path'])
				ftype = self.dt_progress[section]['type']
				author = self.dt_progress[section]['author']
				cuttype = 1
				area = 1
		data = self.getMovieForm(section=section,name=name,author=author,indexname=indexname,ftype=ftype,cuttype=cuttype,area=area)
		if ftype in('mp4','mkv'):
			data['urls'] = ['/hls/%s/%s'%(section,indexname)]
			data['url'] = '/hls/%s/%s'%(section,indexname)
			return data

		# 如果indexname是碎片
		if str(data['cuttype']) in('2','3'):
			for index_name in indexname.split(" "):
				if not index_name:
					continue
				url = '/hls/%s/%s/%s'%(section,path,index_name)
				data['urls'].append(url)
			data['url'] = data['urls'][0]
		else:
			# 检查sectionfile
			if os.path.exists(os.path.join(cacle_path, section)):
				sectionfile = section
			else:
				sectionfile = indexname
			# 如果不存在的先创建
			if not os.path.exists(os.path.join(cacle_path,index_name)):
				with open(os.path.join(cacle_path,sectionfile),'r',encoding='utf-8') as index:
					lines = index.read()
					index_num = 0
					if not lines.startswith('#EXTM3U'):
						# logger.info(lines)
						logger.info('This index file is not startswith(“#EXTM3U”)')
					logger.info('Find index file：%s'%sectionfile)
					try:
						key = re.compile('URI="(.*?)"')
						key_name = key.findall(lines)[0].split('/')[-1]
						if tag:
							rlt,num = re.subn('URI="(.*?)"','URI="/hls/%s/%s/%s"'%(section,path,key_name),lines)
						else:
							rlt,num = re.subn('URI="(.*?)"','URI="/play_d/%s/%s/hls/%s/%s"'%(section,index_num,path,key_name),lines)
						lines = rlt
						index_num += 1
					except:
						pass

					class Convert():
						s = lines
						def __init__(self,index):
							self.index = index
						def convert(self,value):
							matched = value.group().replace('\n','')
							if matched and matched != '\n':
								if tag:
									line = '\n/hls/%s/%s/%s'%(section,path,matched.split('/')[-1])
								else:
									line = '\n/play_d/%s/%s/hls/%s/%s'%(section,self.index,path,matched.split('/')[-1])
								self.index += 1
							else:
								line = '\n'
							return line
					c = Convert(index_num)
					# rlt = re.sub('\n(.*?).*?.ts',c.convert,Convert.s)
					rlt = re.sub('\n(?!#)(.*).*?',c.convert,Convert.s)
					# logger.info(cacle_path)
					with open(cacle_path+'/'+index_name,'w',encoding='utf-8') as file:
						file.write(rlt)
			data['url'] = '/hls/%s/%s/%s'%(section,path,index_name)
			data['urls'].append(data['url'])
		return data

	async def checkindex(self,section,index):
		index = int(index)
		if self.all_dt[section][index]['status'] in (0,3):	# 未下载、下载失败时
			self.dt_progress[section]['tsIndex'] = index
			self.task_dist[section]['queue'].put({'code':2, 'section':section,'tsIndex':index})	# 修改tsindex
			for i in range(120):
				await asyncio.sleep(0.5)
				if self.all_dt[section][index]['status'] == 2:
					break
		return index

	def checkrootdir(self,section):
		if section in self.play_list:
			path = self.play_list[section]
		elif section in self.dt_progress:
			path = self.dt_progress[section]['root']
		else:
			movie = self.db.get('select * from movie where id="%s"'%section)
			path = movie.root_path
			self.play_list[section] = path
		return path

	def binary_search(self,lis, num):
		"""返回{...}"""
		left = 0
		right = len(lis) - 1
		while left <= right:   #循环条件
			mid = (left + right) // 2   #获取中间位置，数字的索引（序列前提是有序的）
			if num < lis[mid]['startTime']:  #如果查询数字比中间数字小，那就去二分后的左边找，
				right = mid - 1   #来到左边后，需要将右变的边界换为mid-1
			elif num > lis[mid]['endTime']:   #如果查询数字比中间数字大，那么去二分后的右边找
				left = mid + 1	#来到右边后，需要将左边的边界换为mid+1
			else:
				if lis[mid]['status'] in(0,3):
					return mid  #如果查询数字刚好为中间值，返回该值得索引
				break
		return -1  #如果循环结束，左边大于了右边，代表没有找到

	def onseeking(self,section,curTime):
		""""""
		logger.info(curTime)
		dt = self.all_dt[section]
		index = self.binary_search(dt,curTime)
		if index>0:
			self.dt_progress[section]['tsIndex'] = index
		return index

	def getCutIndexfile(self,_section,cutlist,data_ts,root_path,path):
		"""2.生成cut的索引文件"""
		origin_path = path
		# indexname = "temp_index.m3u8"
		indexnames = ""
		origin_abspath = os.path.join(root_path,path)
		all_cutTs = []	# 保存所有剪切出来的ts文件
		key_name = None 	# 保存key文件
		cacle_path = path + "_cut"
		# 检查是否已存在记录
		section_one = self.db.get('select * from movie where path="%s"' % cacle_path)
		if section_one:
			section = section_one.id
		else:
			section = str(round(time.time() * 1000))
		# 先创建目录
		abspath = os.path.join(root_path,cacle_path)
		if not os.path.exists(abspath):	#创建目录
			os.makedirs(abspath,exist_ok=True)
		# 创建索引
		logger.info("Create all index file")
		for cut in cutlist:
			startIndex = cut['startIndex'] - 1
			#校正开始索引
			if "#EXT-X-KEY" in data_ts[startIndex]:
				startIndex -= 1
			endIndex = cut['endIndex']
			indexname = "temp_index_"+str(startIndex)+"_"+str(endIndex)+".m3u8"
			if os.path.exists(os.path.join(abspath,indexname)):
				continue
			indexnames += "%s "%indexname
			with open(os.path.join(abspath,indexname),'w',encoding='utf-8') as fd:
				for i in range(len(data_ts)):
					# 以#开头的所有行，并排除EXTINF行
					line = data_ts[i]
					if i<startIndex:
						if line.startswith("#"):
							if "EXTINF" in line:
								continue
							if "URI=" in line:
								# line = line.replace(origin_path,cacle_path)
								try:
									key = re.compile('URI="(.*?)"')
									origin_path = key.findall(line)[0]
									keys = origin_path.split('/')
									key_name = keys[-1]
									key_path = '/hls/%s/%s'%(section,cacle_path)
									after_path = '/'.join([key_path,key_name])
									line = line.replace(origin_path,after_path)
									copyfile(os.path.join(origin_abspath,key_name),os.path.join(abspath,key_name))
								except:
									logger.info("key file copy error")
								logger.info("Index Contain key file")
							fd.write("%s"%line)
							continue

					if i >= startIndex and i<=endIndex:
						if not line.startswith("#"):
							ts = line.split('/')[-1][:-1]
							all_cutTs.append(ts)
							line = '/hls/'+section+'/'+cacle_path+'/'+ts+'\n'
							copyfile(os.path.join(origin_abspath,ts),os.path.join(abspath,ts))
						fd.write("%s"%line)
						continue
					if i>endIndex:
						break
				fd.write('#EXT-X-ENDLIST')
		# 写入数据库
		if indexnames:
			indexnames = indexnames.strip()
			if section_one:
				id = section_one.id
				indexname = section_one.indexname.strip()
				indexname += " %s"%indexnames
				sqlstr = 'update movie set indexname="{indexname}" where id="{id}"'.format(indexname=indexname,id=id)
				self.db.execute(sqlstr)
			else:
				section_one = self.db.get("select * from movie where id=%d"%int(_section))
				name = origin_path
				author = section_one.author
				path = cacle_path
				logo = section_one.logo
				addtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
				mtype = 'm3u8'
				area=section_one.area
				new_logo = section+'.jpg'
				copyfile(os.path.join(appconfig['image_path'], logo), os.path.join(appconfig['image_path'], new_logo))
				sqlstr = 'insert into movie (id,name,author,logo,cuttype,ftype,indexname,path,root_path,area,addtime) \
					values ("%s","%s","%s","%s",2,"%s","%s","%s","%s",%d,"%s");' % (section,name,author,new_logo,mtype,indexnames,path,root_path,area,addtime)
				self.db.execute(sqlstr)

	def getIndex(self,curTime,indexList):
		"""返回行索引"""
		if curTime>=0 and indexList:
			left = 0
			right = len(indexList) - 1
			while left <= right:
				mid = (left + right) // 2
				if curTime < indexList[mid]['startTime']:
					right = mid - 1
				elif curTime > indexList[mid]['endTime']:
					left = mid + 1
				else:
					return indexList[mid]['index']
		logger.info(curTime)
		return indexList[-1]['index']

	def getCutTime(self,section,cuttime):
		if cuttime:
			section_one = self.db.get("select * from movie where id=%d"%int(section))
			cacle_path = os.path.join(section_one.root_path,section_one.path)
			path = section_one.path
			indexname = section_one.indexname
			with open(os.path.join(cacle_path,indexname),'r',encoding='utf-8') as fd:
				data_ts = fd.readlines()
			startTime = 0.0
			endTime = 0.0
			durTime = 0
			for i in range(len(data_ts)):
				ts = data_ts[i]
				if ts.startswith('#'):
					if "EXTINF" in ts:
						durTime = re.compile(':(.*?),')
						durTime = durTime.findall(ts)[0]
						durTime = float(durTime)
					continue
				if ts:
					startTime = endTime
					endTime += durTime
					if startTime<=cuttime<=endTime:
						break
		else:
			startTime = 0.0
		return startTime

	def cutvideo(self,section,timelist):
		"""1.返回截取片段的开始和截止索引"""
		dt = []
		indexList = []
		if timelist:
			section_one = self.db.get("select * from movie where id=%d"%int(section))
			cacle_path = os.path.join(section_one.root_path,section_one.path)
			path = section_one.path
			root_path = section_one.root_path
			indexname = section_one.indexname
			with open(os.path.join(cacle_path,indexname),'r',encoding='utf-8') as fd:
				data_ts = fd.readlines()
			startTime = 0.0
			endTime = 0.0
			durTime = 0
			for i in range(len(data_ts)):
				ts = data_ts[i]
				if ts.startswith('#'):
					if "EXTINF" in ts:
						durTime = re.compile(':(.*?),')
						durTime = durTime.findall(ts)[0]
						durTime = float(durTime)
					continue
				if ts:
					startTime = endTime
					endTime += durTime
					dt.append({'index':i,'startTime':round(startTime,6),'endTime':round(endTime,6),'durTime':round(durTime,6)})
			# for i in dt:
			# 	logger.info(i)
			for ti in timelist:
				# logger.info(ti)
				start,end = ti
				# logger.info("%s,%s"%(start/60,end/60))
				startIndex = self.getIndex(start,dt)
				endIndex = self.getIndex(end,dt)
				indexList.append({'startIndex':startIndex,'endIndex':endIndex})
			logger.info("Cut list：%s"%indexList)
			self.getCutIndexfile(section,indexList,data_ts,root_path,path)
		return indexList

	def cutConvert(self,sections=None,name=None,keyname=None,data=None,path=None):
		# 3.合并文件
		logger.info("Start Merge files")
		ftype = 'mp4'
		if sections: 	# 合并成一个文件
			ts_list = []
			for section in sections:
				section_one = self.db.get("select * from movie where id=%d"%int(section))
				name = section_one.name
				indexname = section_one.indexname
				cacle_path = os.path.join(self.cacle_dir,section_one.path)
				# 解析m3u8文件
				with open(os.path.join(cacle_path,indexname),'r',encoding='utf-8') as f:
					lines = f.readlines()
					key_file = ''
					for line in lines:
						if line.startswith('#') and "URI=" in line:
							k = re.compile('URI="(.*?)"')
							key_name = k.findall(line)[0].split('/')[-1]
							key_file = os.path.join(cacle_path,key_name)
						if line[0] != '#' and line[0] != '':
							ts_name = line.strip().split('/')[-1]
							ts_file = os.path.join(cacle_path,ts_name)
							if os.path.exists(ts_file) and os.path.getsize(ts_file)>0:
								ts_list.append(ts_file) #获取所有的ts文件
			logger.info("Files number： %s"%len(ts_list))
			if ts_list:
				# 删除已下载文件
				file_name = '%s.%s'%(name,ftype)
				download_file = os.path.join(self.cacle_dir,file_name)
				if os.path.exists(download_file):
					os.remove(download_file)

				# 解密
				sprytor = None
				if key_file != '' and os.path.exists(key_file):
					with open(key_file,'rb') as kf:
						key_rb = kf.read()
						sprytor = AES.new(key_rb,AES.MODE_CBC, IV=key_rb)
				try:
					with open(download_file,'ab+') as video:  #合并文件
						for ts in ts_list:
							with open(ts,'rb') as f:
								ts = f.read()
								if sprytor:
									while len(ts) % 16 != 0:
										ts += b"0"
									video.write(sprytor.decrypt(ts))
								else:
									video.write(ts)
				except Exception as e:
					raise
			else:
				file_name = None
			return file_name
		else: 	# 合并成多个文件
			origin_abspath = path
			filenames = []	#保存合并后的文件ming
			#先创建目录
			cutfolder = os.path.join(self.cacle_dir,"Cutfolder")
			if not os.path.exists(cutfolder):
				os.makedirs(cutfolder,exist_ok=True)
			#解析key文件
			sprytor = None
			if keyname:
				keypath = os.path.join(origin_abspath,keyname)
				if os.path.exists(keypath):
					with open(keypath,'rb') as kf:
						key_rb = kf.read()
						sprytor = AES.new(key_rb,AES.MODE_CBC, IV=key_rb)
			# 合并ts文件
			n = 0
			for i in range(len(data)):
				video_name = name+'_'+str(n)+'.'+ftype
				video_path = os.path.join(cutfolder,video_name)
				while os.path.exists(video_path):
					n += 1
					video_name = name+'_'+str(n)+'.'+ftype
					video_path = os.path.join(cutfolder,video_name)
				n += 1
				# if os.path.exists(video_path):
				# 	os.remove(video_path)
				ts_list = data[i]
				try:
					with open(video_path,'ab+') as video:  #合并文件
						for ts in ts_list:
							ts_path = os.path.join(origin_abspath,ts)
							if os.path.exists(ts_path):
								with open(ts_path,'rb') as f:
									ts_path = f.read()
									if sprytor:
										while len(ts_path) % 16 != 0:
											ts_path += b"0"
										video.write(sprytor.decrypt(ts_path))
									else:
										video.write(ts_path)
					filenames.append((video_name,ftype,"Cutfolder"))
				except Exception as e:
					raise
			return filenames

	def convert_movie(self,movies):
		"""合并多个文件夹项目"""
		path = None
		id = None
		root_path = self.cacle_dir
		authors = []
		msg = ''
		try:
			query = "select * from movie where id in (%s)"%(','.join(movies))
			videos = self.db.query(query)
		except Exception as e:
			return '读取数据库失败'
		# 获取目录
		for video in videos:
			cuttype = video['cuttype']
			if path==None and int(cuttype) !=3:
				id = None
				if video.author:
					path = video.author+'_合集'
					name = video.author + '_合集'
				else:
					path = video.path+'_合集'
					name = video.name + '_合集'
				ftype = video.ftype
				logo = video.logo
				indexname = ''
				area=video.area
				root_path = video.root_path
			if int(cuttype)==3:
				if id != None:
					continue
				id = video.id
				path = video.path
				ftype = video.ftype
				logo = video.logo
				name = video.name
				indexname = video.indexname
				area = video.area
				root_path = video.root_path
			authors.append(video.author)
		# 创建目录
		abs_path = os.path.join(root_path,path)
		if not os.path.exists(abs_path):
			os.makedirs(abs_path,exist_ok=True)

		section = str(round(time.time() * 1000))

		# 复制文件
		indexnames = ''
		indexnames += ' %s'%indexname
		for video in videos:
			try:
				err_name = video.name
				if id != video.id:
					indexname = self.copy_movie(video.id,section,path,root_path)
					if indexname != None and indexname != False:
						indexnames += ' %s'%indexname
						self.deletemovie(video.id)
			except:
				msg += ' %s'%err_name
		if len(msg)>0:
			msg = '以下文件处理失败：%s'%msg
		indexnames = indexnames.strip()
		# 写入数据库
		if indexnames:
			try:
				if id:
					sqlstr = 'update movie set indexname="{indexname}" where id="{id}"'.format(indexname=indexnames,id=id)
					self.db.execute(sqlstr)
				else:
					addtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
					author = '/'.join(authors)
					ftype = 'm3u8'
					sqlstr = 'insert into movie (id,name,author,logo,cuttype,ftype,indexname,path,root_path,area,addtime) \
						values ("%s","%s","%s","%s",3,"%s","%s","%s","%s",%d,"%s");' % (section,name,author,logo,ftype,indexnames,path,root_path,area,addtime)
					self.db.execute(sqlstr)
			except Exception as e:
				msg += ' 写入数据库失败：%s'%str(e)
		return msg


	def copy_movie(self,id,section,path,root_path):
		"""复制文件到合集里，并生成新的index文件"""
		movie = self.db.get('select * from movie where id="%s"'%id)
		if not movie:
			return None
		indexfiles = set()
		indexfiles.update(movie.indexname.split(' '))
		# indexnames = movie.indexname
		indexnames = []
		# 文件检查,获取前缀
		start_char = 97
		heji_abspath = os.path.join(root_path,path)
		for root,dirs,files in os.walk(heji_abspath):
			for i in range(100):
				for file in files:
					if file.startswith(chr(start_char)+"_"):
						break
				else:
					break
				start_char += 1
		start_char = chr(start_char)

		# 复制文件,并修改index内容
		origin_abspath = os.path.join(root_path,movie['path'])
		try:
			for root,dris,files in os.walk(origin_abspath):
				for file in files:
					if file.endswith('.m3u8'):	# 一个个循环执行
						indexfiles.add(file)
					else:
						name = start_char+'_'+file
						copyfile(os.path.join(origin_abspath,file), os.path.join(heji_abspath,name))
			for file in movie.indexname.split(' '):
				if file=='':
					continue
				name,ex = file.split('.')
				name = name+'_'+start_char+'.'+ex
				indexnames.append(name)
				with open(os.path.join(origin_abspath,file),'r',encoding='utf8') as f:
					data = f.readlines()
					with open(os.path.join(heji_abspath,name), 'w+',encoding='utf8') as new_f:	# 生成新的index文件
						for line in data:
							if line.startswith("#"):
								if "URI=" in line:
									key = re.compile('URI="(.*?)"')
									origin_path = key.findall(line)[0]
									keys = origin_path.split('/')
									key_name = keys[-1]
									key_path = '/hls/%s/%s'%(section,path)
									key_name = start_char+'_'+key_name
									after_path = '/'.join([key_path,key_name])
									line = line.replace(origin_path,after_path)
								new_f.write("%s"%line)
							else:
								ts = line.split('/')[-1]
								line = '/hls/'+section+'/'+path+'/'+start_char+'_'+ts
								new_f.write("%s"%line)
		except:
			return False
		return ' '.join(indexnames)




	async def convert_m3u(self,section):
		'''文件合并'''
		download_dir = None
		sections = []
		ftype = self.dt_progress[section]['type']
		name = self.dt_progress[section]['name']
		root = self.dt_progress[section]['root']
		path = self.dt_progress[section]['cacle_path']
		cacle_path = os.path.join(root,path)

		# 如果是m3u文件
		if ftype == 'm3u8':
			# 判断是否追加文件
			try:
				prev = self.cfg.get(section,'prev')
				if int(prev):
					pass 	# 如果prev值不为空,则该文件不会合并
				else:
					while int(section):
						sections.append(section)
						section = self.cfg.get(section,'next')
			except:
				sections.append(section) 	# 如果没有该字段，则合并该文件
			# 合并文件
			ts_list = []
			for section in sections:
				cacle_path = self.cfg.get(section,'cacle_path')
				async with aiofiles.open(os.path.join(cacle_path,self.cfg.get(section,'name')),'r') as f:
					lines = await f.readlines()
					for line in lines:
						if line[0] != '#' and line[0] != '':
							ts_name = line.strip().split('/')[-1]
							ts_file = os.path.join(cacle_path,ts_name)
							if os.path.exists(ts_file) and os.path.getsize(ts_file)>0:
								ts_list.append(ts_file) #获取所有的ts文件
			if ts_list:
				# 删除已下载文件
				download_file = os.path.join(download_dir,'%s.mp4'%sections[0])
				if os.path.exists(download_file):
					os.remove(download_file)
				try:
					async with aiofiles.open(download_file,'ab+') as video:  #合并文件
						for ts in ts_list:
							async with aiofiles.open(ts,'rb') as f:
								await video.write(await f.read()) 
				except Exception as e:
					raise
			else:
				download_file = None
		elif ftype == 'mp4':
			name = name +'.' + ftype
			file_path = os.path.join(root,name)
			if os.path.exists(file_path):
				os.remove(file_path)
			try:
				async with aiofiles.open(file_path,'ab') as df:
					files = os.listdir(cacle_path)
					files_list = []
					for file in files:
						if file.endswith('.m3u8'):
							continue
						index = int(file.split('_')[0])
						files_list.append((file,index))
					for file,index in sorted(files_list,key=lambda x: x[1]):
						async with aiofiles.open(os.path.join(cacle_path,file),'rb') as f:
							await df.write(await f.read())
			except:
				pass

		# 删除缓存文件
		# for root,dirs,files in os.walk(cacle_path):
		# 	for file in files:
		# 		os.remove(os.path.join(root,file))
		# 	for d in dirs:
		# 		os.rmdir(os.path.join(root,d))
		# os.rmdir(cacle_path)
		return name



class IterObject():
    def __init__(self,data=[]):
        if not isinstance(data,list):
            raise ValueError
        self._data = data
        self._num = 0
        self.getLen()

    def __iter__(self):
        return self

    def __next__(self):
        if self._num == 0:
            raise StopIteration
        self._num -= 1
        return self._data[-self._num-1]

    def getLen(self):
        self._num = len(self._data)

    def add(self, _list):
        if not isinstance(_list,list):
            raise ValueError
        self._data += _list
        self.getLen()

    def append(self,name):
        self._data.append(name)
        self.getLen()
DOWNLOAD_NUM = 0

def download(key,num):
    response = fetch(key)
    global DOWNLOAD_NUM
    DOWNLOAD_NUM -= 1

async def fetch(key):
    asyncio.sleep(3)
    # if key['status'] not in (0,3):
    #     return
    # url = key['base_url']
    # if key['query']:
    #     url += '?'+key['query']
    # file_name = key['file_name']
    # file_path = os.path.join(key['cacle_path'],file_name)
    # for i in range(3):
    #     try:
    #         response = await http.fetch(url,headers=headers,validate_cert=False)
    #         if response.code != 200:
    #             continue
    #         async with aiofiles.open(file_path,'wb') as fd:
    #             await fd.write(response.body)
    #         break
    #     except Exception as e:
    #         print(e)
    #         continue
    # else:
    #     return key
    # return True

async def create_task():
    dlist = [1,2,3,4,5,6,7,8,9,10,11]
    dIterObj = IterObject(dlist)
    for key in dIterObj:
        num = await check()
        # print(num,time.time())
        response = download(key,num)


async def check():
    while True:
        global DOWNLOAD_NUM
        if DOWNLOAD_NUM<4:
            DOWNLOAD_NUM += 1
            return DOWNLOAD_NUM
        asyncio.sleep(0.5)

async def run():
    await create_task()

if __name__ == '__main__':

    tornado.ioloop.IOLoop.current().run_sync(run)