#-*- coding.utf-8 -*-
from datetime import datetime,timedelta
import requests
import sys,os,re,base64,json
from urllib.parse import quote,unquote
import pprint


''' 命令行参数示例
    begin_time = '00.33.57'
    end_time = '00.37.40'
'''
def calcuTimeDivide():
    begin_time = sys.argv[1]
    end_time = sys.argv[2]
    start_time = datetime.strptime(begin_time,'%H.%M.%S')
    stop_time = datetime.strptime(end_time,'%H.%M.%S')
    print((stop_time-start_time).seconds,'seconds')
HTML_TOP = '''<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<meta charset="utf-8">
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>
'''
HTML_BOTTOM = '''
</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>'''
'''
生成html文件放在nginx目录html里
局域网内访问nginx服务，在浏览器内看本机视频
'''
def genHtmlForVideo(videoDir:str,htmlFile:str):
    aTagBlock = ''
    for f in os.listdir(videoDir): #不需要迭代遍历
        if re.match('.+\.mp4$',f):
            aTag =  '<a href="/video/%s">%s</a><br/>' \
                    % \
                    (f,f)
            aTagBlock += aTag + '\n' 
    html = HTML_TOP + aTagBlock + HTML_BOTTOM
    with open(htmlFile,'w',encoding='utf-8') as h:
        h.write(html)
temp = '"video_abstract":"fff"Hello Boys"fff"oooooooooooooooojjjjjj""fdsafd'
HEADER = {
    "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
    #,"sec-ch-ua": 'Google Chrome";v="87", " Not;A Brand";v="99", "Chromium";v="87'
    #,"Referer": "https://www.ixigua.com"
}

def downloadW(file_name,_url):
    _response = requests.get(_url,headers=HEADER,stream=True)
    #_content = _response.content
    _content_size = int(_response.headers.get('content-length'))
    _data_count = 0
    with open(file_name,'ab') as _f :
        for chunk in _response.iter_content(chunk_size=40960):
            if chunk :
                _f.write(chunk)
                _data_count += len(chunk)
                _now_rate = (_data_count/_content_size)*100
                print('\rloading...: %02d%% (%d/%d)' % (_now_rate,_data_count,_content_size),end=' ')
    _response.close() #stream=True时必须要手动关闭response

def downloadHtml(file_name,_url):
    _response = requests.get(_url,headers=HEADER)
    _content = _response.text
    with open(file_name,'w') as _f :
        _f.write(_content)

from bs4 import BeautifulSoup
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Referer':'https://www.youtube.com/',
}
url_str = 'https://www.ixigua.com/'
headers1={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Origin':url_str,
'Referer':url_str,
'Cookie':''
}
_url = ''
#res=requests.get(_url,headers=headers)
#soup=BeautifulSoup(res.text,'html.parser')
#print(soup)
BROWSER_BOOKMARK_TEMPLATE_BLOCK_DIV = '''<div id="{domain_name}" class="card text-left" style="width:;">
  <div class="card-header">
    {domain_name}
  </div>
  <ul class="list-group list-group-flush">
{a_list}
  </ul>
</div><br>
'''
BROWSER_BOOKMARK_TEMPLATE_BLOCK_A = '''    <a class="list-group-item" target='_blank' href="{href}">{a_display_value}</a>'''
A_ANCHOR_POINT_TEMPLATE_BLOCK_A = '''<a class="nav-link" href="#{domain_name}">{href}<span class="badge bg-secondary">{sub_url_sum}</span></a>'''
#遍历文件
def seekDir(path):
    archivesList = []
    archives = os.listdir(path)
    for arch in archives:
        if os.path.isdir(arch):
            pass
        else:
            print(os.path.join(path,arch))
            archivesList.append(os.path.join(path,arch))
    return archivesList

#创建书签列表html网页
#  markbookHtmlPath 从浏览器导出的书签文件所在目录
#  tablinkHtmlTemplate 要生成书签列表页html网页模板文件
#  tablinkHtml 书签列表目标文件
def generateTablinkHtml(markbookHtmlPath,tablinkHtmlTemplate,tablinkHtml):
    markbookFiles = seekDir(markbookHtmlPath)

    htmls = ''
    for file in markbookFiles:
        with open(file,'r',encoding='utf-8') as f:
            htmls = htmls + f.read()
    #links = re.findall('<A HREF="(.*?)" ADD_DATE=".*?" ICON=".*?">(.*?)</A>',htmls)
    links = re.findall('<A HREF="(.*?)" ADD_DATE=".*?"( ICON=".*?"?){0,1}>(.*?)</A>',htmls)
    linkgroup = {} #{域名:该域名下url数量} 计数
    linkListByDomain = {} #{域名:[(url,title)...]} 全量
    linkListByDomainDict = {} #{域名:{url:title,...}} 全量去重
    linkListByUrl = {} #{url:[(域名,title)...]} 去重
    for l in links:
        urlstr = l[0]
        displayName = l[2]
        if '' == displayName :
            displayName = urlstr
        domainstr = re.findall('^(http[s]{0,1}://.*?/).*',urlstr)
        if domainstr[0] in linkgroup :
            linkgroup[domainstr[0]]=linkgroup[domainstr[0]]+1
            linkListByDomain[domainstr[0]].append((urlstr,displayName))
            linkListByDomainDict[domainstr[0]][urlstr]=displayName
        else :
            linkgroup[domainstr[0]]=1
            linkListByDomain[domainstr[0]] = [(urlstr,displayName)]
            linkListByDomainDict[domainstr[0]] = {urlstr:displayName}

    #pprint.pprint(linkgroup)
    #pprint.pprint(linkListByDomain)
    tablink_div_element = ''
#    for k,v in linkListByDomain.items():
#        a_list_element = ''
#        for _l in v:
#            a_list_element = a_list_element + '\n' +BROWSER_BOOKMARK_TEMPLATE_BLOCK_A.format(href=_l[0],a_display_value=_l[1])
#        tablink_div_element = tablink_div_element + '\n' + BROWSER_BOOKMARK_TEMPLATE_BLOCK_DIV.format(domain_name=k,a_list=a_list_element)
    #去重
    for k,v in linkListByDomainDict.items():
        a_list_element = ''
        for _lk,_lv in v.items():
            a_list_element = a_list_element + '\n' +BROWSER_BOOKMARK_TEMPLATE_BLOCK_A.format(href=_lk,a_display_value=_lv)
        tablink_div_element = tablink_div_element + '\n' + BROWSER_BOOKMARK_TEMPLATE_BLOCK_DIV.format(domain_name=k,a_list=a_list_element)
    #拼接锚点（目录）
    anchorpoint_div_element = ''
    url_sum = 0
    for k,v in linkListByDomainDict.items():
        num = len(v)
        anchorpoint_div_element = anchorpoint_div_element + A_ANCHOR_POINT_TEMPLATE_BLOCK_A.format(domain_name=k,href=k,sub_url_sum=num)
        url_sum += num

    with open(tablinkHtmlTemplate,'r',encoding='utf-8') as tabhtml:
        with open(tablinkHtml,'w',encoding='utf-8') as tblink:
            tblink.write(tabhtml.read().replace('{&&tab_link&&}',tablink_div_element).replace('{&&a_anchor_point&&}',anchorpoint_div_element).replace('{&&domain_sum&&}',str(len(linkListByDomainDict))).replace('{&&url_sum&&}',str(url_sum)))

from downloadVideo import getVideoPageHtml
def generateFile(file_name:str,content:str):
    with open(file_name,'w',encoding='utf-8') as f:
        f.write(content)

#获取并整理youtube视频字幕
def fetchCaptionOfYoutube(html:str,languageType='en',filename=''):
    json_text_list = re.findall('<script nonce=".*?">var ytInitialPlayerResponse = (.*?);var',html)
    json_text = '{}'
    if len(json_text_list)>0:
        json_text = json_text_list[0]
    info = json.loads(json_text)
    caption_url = info['captions']['playerCaptionsTracklistRenderer']['captionTracks'][0]['baseUrl']
    types = re.findall('(lang=.*)&{0,1}',caption_url)
    if languageType != 'en' and len(types) > 0 :
        #print('type='+types[0],',lang='+languageType)
        caption_url = caption_url.replace(types[0],'lang='+languageType)
    if filename == '' :
        filename = info['videoDetails']['title']
        filename = filename.replace(' ','_').replace('|','_')
    print(filename,'\n',caption_url) #lang=zh-Hans or lang=en
    genCaptionFileFromCaptionWebpage(caption_url,languageType,filename)

def genCaptionFileFromCaptionWebpage(_url:str,_language:str,_filename:str):
    transcript = getVideoPageHtml(_url).replace('&amp;#39;',"'")
    bsp = BeautifulSoup(transcript,'html.parser')
    text_list = bsp.find_all('text')
    srt_string = ''
    txt_string = ''
    no = 1
    sum_text_list = len(text_list)
    for index in range(sum_text_list):
        start_time = timedelta(seconds=float(text_list[index]['start']))
        dur_time = timedelta(seconds=float(text_list[index]['dur']))
        end_time = start_time + dur_time
        if index+1 < sum_text_list :
            end_time = timedelta(seconds=float(text_list[index+1]['start']))
        srt_string = "%s%s%s --> %s%s" % (srt_string,str(no)+'\n',formatTime(start_time),formatTime(end_time)+'\n',text_list[index].string+'\n'*3)
        no += 1
        txt_string = txt_string + text_list[index].string + '\n'
    generateFile('./caption_%s_%s.srt' % (_filename,_language),srt_string)
    generateFile('./caption_%s_%s.txt' % (_filename,_language),txt_string)

def genCaptionFileFromCaptionXML(_xmlfilename:str,_filename:str,_language='en'):
    with open(_xmlfilename,'r',encoding="utf-8") as f:
        transcript = f.read()
    bsp = BeautifulSoup(transcript,'html.parser')
    text_list = bsp.find_all('text')
    srt_string = ''
    txt_string = ''
    no = 1
    sum_text_list = len(text_list)
    for index in range(sum_text_list):
        start_time = timedelta(seconds=float(text_list[index]['start']))
        dur_time = timedelta(seconds=float(text_list[index]['dur']))
        end_time = start_time + dur_time
        if index+1 < sum_text_list :
            end_time = timedelta(seconds=float(text_list[index+1]['start']))
        srt_string = "%s%s%s --> %s%s" % (srt_string,str(no)+'\n',formatTime(start_time),formatTime(end_time)+'\n',text_list[index].string+'\n'*3)
        no += 1
        txt_string = txt_string + text_list[index].string + '\n'
    generateFile('./caption_%s_%s.srt' % (_filename,_language),srt_string)
    generateFile('./caption_%s_%s.txt' % (_filename,_language),txt_string)

#格式化时间
def formatTime(time_obj:object):
    result = str(time_obj)
    hours = result.split(':')
    if len(hours) > 1 and len(hours[0])<2:
        result = '0' + result
    if '.' in result :
        return result[:-3:].replace('.',',')
    else :
        return result + ',000'
    return result

video_dir = 'c:/e/temp/video/'
pic_dir = 'c:/e/temp/pic/'

import pandas as pd

def excelToMarkdown(excelFile,mdFileName):
    # 数据文件
    excel_file = excelFile
    excel = pd.read_excel(excel_file)              # 读取Excel表格
    excel_table_head = list(excel.columns.values)  # 读取表头
    table_head = '|'.join(excel_table_head) + "\n" # 拼接表头
    # 获取表格主体
    excel_table_body = list(excel.iloc[0:].values)
    new_table_body = []
    # 将每一个列表项转换为字符串
    for i in excel_table_body:
        row = []
        for j in i:             # 对这一行的遍历
            row.append(str(j))  # 转换为字符串并加入row列表
        new_table_body.append(row)  # 再将row加入new_table_body
    # 拼接列表主体
    table_body = '\n'.join(['|'.join(i) for i in new_table_body])
    # 制作列表分隔符
    table_split = '-|' * len(excel_table_head) + "\n"
    # 拼接成table变量
    table = table_head + table_split + table_body
    # 输出到文件
    with open(os.path.join(pic_dir,mdFileName), "w", encoding="UTF-8") as f:
        f.write(table)



if __name__ == '__main__':
    #calcuTimeDivide()
    #video_dir = 'c:/e/temp/video'
    #html_file = 'C:/D/tools/nginx-1.20.2/html/video.html'
    #genHtmlForVideo(video_dir,html_file)
    #main_url = '=='
    #with open('icon.png','wb') as p:
    #    p.write(base64.b64decode(main_url))
    #print(base64.b64decode(main_url).decode('utf-8'))
    #raise ValueError("代码抛了一个异常")
    #print(re.sub('\"video_abstract\":\".*\"','\"video_abstract\":\"\"',temp))
    #print(re.findall('\"video_abstract\":\".*?(\".*?\").*?\"',temp))
    #downloadW('./Motorsport News – October 20 2022.pdf','https://')
    #res=requests.get(url_str,headers=headers)
    #soup=BeautifulSoup(res.text,'html.parser')
    #with open('./temp.html','w') as f:
    #    f.write(res.text)
    #rawurl='%7B%22%7D%7D%7D'
    #url=unquote(rawurl,'utf-8')
    #pprint.pprint(url)
    #generateTablinkHtml('C:/E/temp/pic/','C:/E/codes/python/mydjango/website/templates/tablink_temp.html','C:/E/codes/python/mydjango/website/templates/tablink.html')
    #generateTablinkHtml('C:/E/temp/pic/','C:/E/work_myself/python_study/my-django/website/templates/tablink_temp.html','C:/E/work_myself/python_study/my-django/website/templates/tablink.html')
    temp = ""
    #_url = base64.b64decode(temp)
    #with open('./unquote.txt','a',encoding="utf-8") as f:
    #    temp = '-'*100 + '\n' + _url.decode('utf-8') + '\n' + '-'*100 + '\n\n'
    #    f.write(temp)
    #    f.flush()
    #exit()
    with open('./content.txt','r',encoding="utf-8") as f:
        temp = f.read()
    #filename = ''
    #if len(sys.argv) > 1 :
    #    filename = sys.argv[1]
    #fetchCaptionOfYoutube(temp,'en',filename)
    #fetchCaptionOfYoutube(temp,'zh-Hans',filename)
    #excelToMarkdown(os.path.join(pic_dir,'lln_excel_subs_2023-3-10_5837146.xlsx'),'newfile.md')
    #genCaptionFileFromCaptionXML('./beachjsonsrt.xml','beach.srt')
    #exit()
    with open('./unquote.txt','a',encoding="utf-8") as f:
        text_json = '-'*100 + '\n'
        infojson = re.findall('<script id="RENDER_DATA" type="application/json">(.*?)</script>',temp)
        infojson = unquote(infojson[0]) if len(infojson)>0 else '{}'
        if infojson == '{}':
            print('not find jsoninfo!')
            exit()
        #---调试代码 begin---#
        #f.write(infojson)
        #f.flush()
        #exit()
        #---调试代码 end  ---#
        jsonobj = json.loads(infojson)
        video_title = jsonobj['data']['initialVideo']['title']
        text_json += video_title + '\n'
        text_json += 'cover img:%s' % jsonobj['data']['initialVideo']['coverUrl'] + '\n'
        print(video_title)
        video_info = ''
        if 'video_list' in jsonobj['data']['initialVideo']['videoPlayInfo']:
            video_info = jsonobj['data']['initialVideo']['videoPlayInfo']['video_list']
            for v in video_info:
                v_txt = '\n'
                v_txt = v['video_meta']['definition'] + '\n'
                v_txt += v['main_url'] + '\n'
                print(v['video_meta']['definition'])
                text_json += v_txt
        if 'dynamic_video' in jsonobj['data']['initialVideo']['videoPlayInfo']:
            if 'dynamic_video_list' in jsonobj['data']['initialVideo']['videoPlayInfo']['dynamic_video']:
                video_info = jsonobj['data']['initialVideo']['videoPlayInfo']['dynamic_video']['dynamic_video_list']
                audio_info = jsonobj['data']['initialVideo']['videoPlayInfo']['dynamic_video']['dynamic_audio_list']
                for v in video_info:
                    v_txt = '\n'
                    v_txt = v['video_meta']['definition'] + '\n'
                    v_txt += v['main_url'] + '\n'
                    print(v['video_meta']['definition'])
                    text_json += v_txt
                for v in audio_info:
                    v_txt = '\n'
                    v_txt = str(v['audio_meta']['real_bitrate']) + '\n'
                    v_txt += v['main_url'] + '\n'
                    print(v['audio_meta']['real_bitrate'])
                    text_json += v_txt
        text_json += '-'*100 + '\n\n'
        f.write(text_json)
        f.flush()