from tornado.queues import Queue
from tornado.gen import multi
from tornado.ioloop import IOLoop
from tornado.httpclient import AsyncHTTPClient,HTTPClient
from tornado.httputil import HTTPHeaders
from multiprocessing import Queue as mQueue
from urllib.parse import urlparse,urlsplit
from config import appconfig
from utils import ProStatus
from logger import logger
import time
import datetime
import asyncio
import nest_asyncio
nest_asyncio.apply()
import os
import re
import sys
import aiofiles
import argparse
import database

parser = argparse.ArgumentParser(usage='Download.py [-m MAX] [-n NAME] [-u URL] [-p PATH] [-t TXT]',description='注意：URL和TXT必须设置一个，如果都设置默认使用URL')
parser.add_argument('-n','--name',type=str,help="文件名称。如果不设置，则从链接中获取文件名称")
parser.add_argument('-m','--max',type=int,default=8,help='同时下载最大任务数，默认8任务')
parser.add_argument('-u','--url',type=str,help='下载链接')
parser.add_argument('-p','--path',type=str,help='文件保存路径。如果不设置，默认保存在~/Downloads目录下')
parser.add_argument('-f','--ftype',type=str,help="下载的文件类型")
parser.add_argument('-c','--continueto',type=str,help="重新下载文件，为文件缓存路径，此时若传参name将无效")
args = parser.parse_args()

Headers = {
	'User-Agent': 'YouXinZhengQuan/8.5.0 (iPhone; iOS 15.1.1; Scale/3.00)'
}

class Base(object):
    def __init__(self, **kwargs):
        self.max = int(kwargs.get('max', 8))
        self.name = kwargs.get('name')
        self.url = kwargs.get('url')
        self.path = kwargs.get('path')
        self.continueto = kwargs.get('continueto')  # txt路径
        self.file_type = kwargs.get('ftype', 'file')
        self.queue = kwargs.get('queue')    # 保存该任务的下载队列

        self.root_dir = appconfig.get('video_path')



        self.http = AsyncHTTPClient()
        # self.sync_http = HTTPClient()
        self.HEADERS = HTTPHeaders()
        self.HEADERS.update(Headers)

class TXT(Base):
    def __init__(self,**kwargs):
        super().__init__(**kwargs)
        self.urls = []

    async def read(self):
        filepath = os.path.join(self.path,self.name)
        if not os.path.exists(filepath):
            raise FileExistsError('文件index.txt不存在')

        async with aiofiles.open(filepath,'r') as fd:
            lines = await fd.readlines()
            for line in lines:
                url = line.replace('\n','')
                if len(url)>0:
                    file = os.path.split(urlparse(url).path)[-1]
                    file = os.path.join(self.path,file)
                    if os.path.exists(file):    # 文件存在则不下载
                        continue
                    self.queue.put(url)
                    self.urls.append(url)
        logger.info('解析成功，下载任务数：%s'%len(self.urls))
        return self.urls

class M3U8(Base):
    def __init__(self,**kwargs):
        super().__init__(**kwargs)
        self.urls = []

    async def downloadUrl(self,url):
        filename = urlparse(self.url).path.split('/')[-1]
        filepath = os.path.join(self.path,filename)
        if os.path.exists(filepath):
            os.remove(filepath)
        for i in range(3):
            logger.info('开始下载链接')
            response = await self.http.fetch(url,headers=self.HEADERS,validate_cert=False,raise_error=False,request_timeout=30)
            if response==None:
                msg = '请求url失败'
            elif response.code in (200,206):
                if len(response.body)>0:
                    async with aiofiles.open(filepath,'ab') as fd:
                        data = response.body
                        await fd.write(data)
                    return data
                else:
                    msg = '读取到0个字节'
            else:
                msg = ' http_code：%s'%response.code
            logger.info(msg)
        else:
            logger.info('链接下载失败3次')
            return

    async def checkUrl(self):
        parsed_result = urlparse(self.url)
        filename = parsed_result.path.split('/')[-1]
        filepath = os.path.join(self.path,filename)
        scheme = parsed_result.scheme
        netloc = parsed_result.netloc
        netpath = os.path.split(parsed_result.path)[0]
        data = await self.downloadUrl(self.url)
        if data==None:
            return
        urls = data.decode().split('\n')
        if not urls[0].startswith('#EXTM3U'):
            logger.info('源文件格式错误')
            return

        for i in range(len(urls)-1,-1,-1):
            t_url = urls[i]
            p_rlt = ''
            if t_url!='':
                if not t_url.startswith('#EXT-X-ENDLIST'):
                    p_rlt = urlparse(t_url)
                break
            continue

        if p_rlt != '':
            if os.path.split(p_rlt.path)[-1] == 'index.m3u8':	#重新下载
                if not p_rlt.scheme:	# 不是完整url链接
                    if not t_url.startswith('/'):
                        t_url = '/'+ t_url
                    url = scheme+'://'+netloc+netpath+t_url
            elif t_url.startswith('/'):		# 如果不是index.m3u8后缀
                url = scheme+'://'+netloc+t_url
            else:
                logger.info('未匹配上，检查源文件内容：%s'%t_url)
                return
            data = await self.downloadUrl(t_url)
            if data==None:
                return
            urls = data.decode().split('\n')
            if not urls[0].startswith('#EXTM3U'):
                logger.info('源文件格式错误')
                return
        logger.info('成功下载链接')
        # 获取链接
        txt = ''
        for ts in urls:
            ts = ts.replace('\n','')
            if ts == '':
                continue
            if ts.startswith('#'):
                if "URI=" in ts:
                    key = re.compile('URI="(.*?)"')
                    key = key.findall(ts)[0]
                    key = urlparse(key)
                    scheme_key=key.scheme
                    netloc_key = key.netloc
                    path = os.path.split(key.path)[0]
                    file = os.path.split(key.path)[1]
                    query = key.query
                    if os.path.exists(os.path.join(self.path,file)):
                        continue
                    # 下载key
                    if not path:
                        base_url = scheme+'://'+netloc+netpath+'/'+file
                    elif scheme_key:
                        base_url = scheme_key+'://'+netloc_key+path+'/'+file
                    else:
                        base_url = scheme+"://"+netloc+path+'/'+file
                    if query:
                        base_url += '?'+query
                    self.urls.append(base_url)
                    txt += base_url+'\n'
                continue
            # 处理ts路径
            ts = urlparse(ts)
            scheme_ts = ts.scheme
            netloc_ts = ts.netloc
            path_ts = ts.path
            path = os.path.split(path_ts)[0]
            file = os.path.split(path_ts)[1]
            query = ts.query
            if os.path.exists(os.path.join(self.path,file)):
                continue
            # 下载ts
            if not path:	#只有文件名时
                base_url = scheme+'://'+netloc+netpath+'/'+file
            elif scheme_ts:	#为完整url时
                base_url = scheme_ts+'://'+netloc_ts+ path+'/'+file
            else:   #含有路径时
                base_url = scheme+'://'+netloc+ path+'/'+file
            if query:
                base_url += '?'+query
            self.urls.append(base_url)
            txt += base_url+'\n'
        txtfile = os.path.join(self.path,'index.txt')
        if not os.path.exists(txtfile):
            async with aiofiles.open(txtfile,'ab') as fd:
                await fd.write(txt.encode(encoding='utf-8'))
        logger.info('链接解析完毕，开始下载，下载任务数：%s'%len(self.urls))
        for url in self.urls:
            self.queue.put(url)
        return self.urls

class FILE(Base):
    def __init__(self,**kwargs):
        super().__init__(**kwargs)
        self.urls = []

    async def checkUrl(self):
        parsed_result = urlparse(self.url)
        filename = parsed_result.path.split('/')[-1]
        filepath = os.path.join(self.path,filename)
        scheme = parsed_result.scheme
        netloc = parsed_result.netloc
        netpath = os.path.split(parsed_result.path)[0]
        # data = await self.downloadUrl(self.url)
        if data==None:
            return
        urls = data.decode().split('\n')
        if not urls[0].startswith('#EXTM3U'):
            logger.info('源文件格式错误')
            return

        for i in range(len(urls)-1,-1,-1):
            t_url = urls[i]
            p_rlt = ''
            if t_url!='':
                if not t_url.startswith('#EXT-X-ENDLIST'):
                    p_rlt = urlparse(t_url)
                break
            continue

        if p_rlt != '':
            if os.path.split(p_rlt.path)[-1] == 'index.m3u8':	#重新下载
                if not p_rlt.scheme:	# 不是完整url链接
                    if not t_url.startswith('/'):
                        t_url = '/'+ t_url
                    url = scheme+'://'+netloc+netpath+t_url
            elif t_url.startswith('/'):		# 如果不是index.m3u8后缀
                url = scheme+'://'+netloc+t_url
            else:
                logger.info('未匹配上，检查源文件内容：%s'%t_url)
                return
            data = await self.downloadUrl(t_url)
            if data==None:
                return
            urls = data.decode().split('\n')
            if not urls[0].startswith('#EXTM3U'):
                logger.info('源文件格式错误')
                return
        logger.info('成功下载链接')

class Download(Base):
    loop = IOLoop.current()

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.tasks_step = {}
        self.section = kwargs.get('section')
        self.dt = kwargs.get('dt')
        self.index = None
        if self.dt!=None:   # 来自主进程
            self.file_type = 'dt'
        else:
            self.local = False
            if self.path==None: # 获取文件保存路径
                self.path = self.root_dir
            if self.url:
                filename = os.path.split(urlparse(self.url).path)[-1]
                if self.name:
                    self.path = os.path.join(self.path,self.name)   # file和m3u8类型都会先下载到与文件同名的文件夹里。
                else:
                    self.path = os.path.join(self.path,filename)
                self.name = filename
                if self.name.endswith('.m3u8'):
                    self.file_type = 'm3u8'
            if self.url==None and self.continueto:
                self.local = True
                if os.path.exists(self.continueto):
                    if os.path.isfile(self.continueto):
                        self.path,self.name = os.path.split(self.continueto)
                    else:
                        self.path = self.continueto
                        self.name = 'index.txt' # 'index.file'/'index.txt'
                    # file_type通过文件后缀来判断    'txt'类型实际还是m3u8文件，'file'类型则是其他文件
                    self.file_type = self.name.split('.')[1]
                else:
                    logger.info('文件或目录不存在：',self.continueto)
                    exit()



        if not os.path.exists(self.path):
            os.mkdir(self.path)

        self.urls = []
        self.ok_index = set()
        self.fail_index = set()

        self.q = Queue()        # 下载状态
        self.queue = Queue()    # 下载链接
        self.proc_q:mQueue = kwargs.get('proc_queue1')         # 传给主进程
        self.proc_q2:mQueue = kwargs.get('proc_queue2')        # 主进程传过来
        Download.loop.run_sync(self.run)
        loop = asyncio.get_event_loop()
        loop.run_forever()
    async def fetch(self,id):
        self.tasks_step[id] = 0
        try:
            async for dt in self.queue:
                self.tasks_step[id] = 1 # 收到任务
                if dt == None or dt=='':  # None要放外面，因为不是任务，不能作用task_done
                    break
                if isinstance(dt,dict):
                    index = dt.get('index')
                    if self.index != None:
                        if self.index != index:
                            self.queue.put(dt)
                            continue
                        else:
                            self.index = None
                    url = dt.get('base_url')
                    if dt.get('query'):
                        url += '?'+ dt.get('query')
                    filename = dt.get('file_name')

                elif isinstance(dt,tuple):
                    url,filename=dt
                    index = 0
                else:
                    url = dt
                    filename = os.path.split(urlparse(dt).path)[1]
                    index = 0
                if url in self.fail_index:
                    if self.proc_q==None:
                        logger.info('重新下载:'+self.urls.index(url))
                try:
                    filepath = os.path.join(self.path,filename)
                    if os.path.exists(filepath) and os.path.getsize(filepath)>2048: # 2k
                        self.q.put({'code':0,'url':url,'status':True,'filename':filename,'msg':'文件已经存在','size':0})
                        continue

                    status = True
                    msg = '下载完成'
                    size=0
                    response = await self.http.fetch(url, headers=self.HEADERS, validate_cert=False, raise_error=False,
                                                     request_timeout=10)
                    self.tasks_step[id] = 2 # 已经下载到数据
                    if response==None:
                        msg = '请求url失败'
                        status=False
                    elif response.code in (200,206):
                        size = len(response.body)
                        if size>0:
                            async with aiofiles.open(filepath,'ab') as fd:
                                data = response.body
                                await fd.write(data)
                        else:
                            status = False
                            msg = '读取到0个字节'
                    else:
                        status = False
                        msg = ' http_code==%s'%response.code
                except Exception as e:
                    msg = '{} 请求出错'.format(self.section)
                    # logger.info(msg)
                    status = False
                    if self.proc_q==None:
                        logger.info(e)
                self.q.put({'code': 0, 'url': url, 'status': status, 'filename': filename, 'index': index, 'msg': msg, 'size': size})
                self.tasks_step[id] = 3 # 单条任务完成（保存完成）
        except asyncio.CancelledError:
            # print('Task %s was cancelled'%id)
            raise
        finally:
            pass

    async def check_proc(self):
        while True:
            if not self.proc_q2.empty():
                data=self.proc_q2.get_nowait()
                code = data.get('code')
                if code==1:
                    logger.info('{} 收到暂停信号'.format(self.section))
                    for task in asyncio.all_tasks():
                        if not task.done():
                            name = task.get_name()
                            if name in self.tasks_step:
                                for i in range(100):  # 100次循环取消操作(1s钟)
                                    if self.tasks_step[name]!=2:
                                        task.cancel()
                                        break
                                    else:
                                        await asyncio.sleep(0.01)
                    self.proc_q.put({'section': self.section, 'flag': 'stop'})
                    await asyncio.sleep(1)
                    logger.info('{} 下载暂停，退出进程。'.format(self.section))
                    exit()
                elif code==2:
                    tsIndex = data.get('tsIndex')
                    self.index = tsIndex
            await asyncio.sleep(0.2)


    async def check(self):
        # 该协成结束说明下载完成
        async for item in self.q:   # code, url,status,filename,msg,size
            code = item.get('code',0)
            if code==0:
                status = item.get('status')
                url = item.get('url')
                filename = item.get('filename','')
                index = item.get('index',0)
                msg = item.get('msg','')
                size = item.get('size',0)
                if status==None:
                    continue
                elif status==False:
                    self.fail_index.add(url)
                    await self.queue.put(url)
                else:
                    self.ok_index.add(url)
                    if url in self.fail_index:
                        self.fail_index.remove(url)
                pct = round(len(self.ok_index) / len(self.urls) * 100, 1)
                if self.proc_q==None:
                    logger.info('下载进度：{}/{}({}%), {}{}'.format(len(self.ok_index),len(self.urls),pct,filename,msg))
                else:
                    self.proc_q.put({'section':self.section,'flag':status,'num':len(self.ok_index),'size':size,'index':index})
                if len(self.ok_index)==len(self.urls):
                    for i in range(self.max):
                        await self.queue.put(None)
                    break   # 通过break结束代码来结束该协程
            else:
                break
        # 能执行到这来说明可以退出进程了
        if self.proc_q == None:
            logger.info('下载完成:%s' % self.path)
        else:
            self.proc_q.put({'section': self.section, 'flag': 'end'})
        await asyncio.sleep(0.5)
        logger.info('{} 下载结束，退出进程。'.format(self.section))
        exit()

    async def run(self):
        Download.loop.spawn_callback(self.check)
        Download.loop.spawn_callback(self.check_proc)
        logger.info('%s 文件名：%s'%(self.section,self.name))
        # logger.info('保存路径：'+self.path)
        if self.file_type=='dt':
            for dt in self.dt:
                url = dt.get('base_url')
                name = dt.get('file_name')
                filepath = os.path.join(self.path, name)
                if os.path.exists(filepath) and os.path.getsize(filepath) > 2048:  # 2k
                    continue    # 排除已存在的文件
                if dt.get('query'):
                    url += '?'+dt.get('query')
                self.queue.put(dt)
                self.urls.append(url)
        if self.file_type=='m3u8':
            self.urls = await M3U8(**{'url':self.url,"path":self.path,'queue':self.queue}).checkUrl()
        elif self.file_type=='txt':
            self.urls = await TXT(**{'name':self.name,'path':self.path,'queue':self.queue}).read()
        elif self.file_type=='file':
            if not self.local:  # 在线文件
                self.urls = await FILE(**{'url':self.url,'path':self.path,'queue':self.queue}).checkUrl()
            else:       # 本地文件
                pass

        if len(self.urls)==0:
            logger.info('下载任务数为0')
            self.q.put({'code': 1, 'flag': ''})
        else:
            if self.proc_q!=None:
                self.proc_q.put({'section':self.section,'flag':'total','num':len(self.urls)})
            if len(self.urls)<self.max:
                self.max = len(self.urls)
            logger.info('{} 同时下载任务数：{}'.format(self.section,self.max))
            for i in range(self.max):
                asyncio.create_task(self.fetch(str(i)),name=str(i))
            # await multi(self.fetch() for _ in range(self.max))

def new_download(**kwargs):
	try:
		Download(**kwargs)
	except KeyboardInterrupt:
		pass

if __name__ == '__main__':
    # 命令行运行
    if args.url==None and args.continueto==None:
        parser.print_help()
        # exit()
    # 双击运行
    while True:
        res = input('请输入名称+url：')
        if res=='':
            exit()
        else:
            res = res.strip().split()
            ptype = ''
            key = ''
            data = {}
            kwargs = {}
            for param in res:
                if param:
                    if param.startswith('-'):
                        ptype = 'key'
                        key = param.replace('-','')
                    else:
                        if ptype=='key' and key!='':
                            data[key] = param
                        ptype = 'value'
                        key = ''
            for key,value in data.items():
                if key in ('n','name'):
                    key = 'name'
                elif key in ('m','max'):
                    key = 'max'
                elif key in ('u','url'):
                    key = 'url'
                elif key in ('p','path'):
                    key = 'path'
                elif key in ('c','continueto'):
                    key = 'continueto'
                elif key in ('f','ftype'):
                    key = 'ftype'
                else:
                    key = ''
                if key != '':
                    kwargs[key] = value
            if kwargs == {}:
                if len(res)==2:
                    if res[0].startswith('http'):
                        url, name = res
                    else:
                        name, url = res
                    kwargs['name'] = name
                    kwargs['url'] = url
            Download(**kwargs)