#-*- coding: utf-8 -*-

'''
from urllib import request
def getHtmlContentFromUrl(_url):
_response = request.urlopen(_url)
return _response.read().decode('utf-8')
########################################
'''

import requests,re,json,time,os,base64
from openpyxl import Workbook as opwb,load_workbook
from openpyxl.styles import PatternFill,Font
from bs4 import BeautifulSoup

def getContentFromFile(_file):
    _o = open(_file,'r',encoding="utf-8")
    _content = _o.read()
    _o.close()
    return _content

def getPattern(_express):
    return re.compile(_express)

def getListFromContentTextByPattern(_express,_content):
    _pattern = getPattern(_express)
    return _pattern.findall(_content)

HEADERS = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
          ,'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' # 客户端能够接收的内容类型
          ,'Accept-Language': 'en-US,en;q=0.5' # 浏览器可接受的语言
          ,'Connection': 'keep-alive' # 表示是否需要持久连接
          }

def getHtmlContentFromUrlByRquests(_url):
    _response = requests.get(_url,headers=HEADERS)
    return _response.text

YOUKU_VIDEO_NO_TABLE = '<a href="(.+?)".+?target="video" title="(.+?)">'
BILIBILI_VIDEO_TABLE = '<a href="(.+?)" target="_blank" class="cover"><img.+?alt="(.+?)"><span class="length">(.+?)</span>.+?</a>'
BILIBILI_VIDEOS_JSON = '{"comment":.+?"title":"(.+?)".+?"length":"(.+?)".+?"bvid":"(.+?)".*?}'

#----------fetch bilibili playlist begin------------------------#
def getBilibiliUpperPlaylist():
    videos = []
    _html_url = ''#视频列表url
    for index in range(1,2):
        _url_content = getHtmlContentFromUrlByRquests(_html_url % index)
        videos.extend(getListFromContentTextByPattern(BILIBILI_VIDEOS_JSON,_url_content))
    #print('total:%d' % len(videos))
    for _v in videos:
        #print('%s\t%s' % (_v[1],_v[0]))
        print('%s\t%s\thttps://www.bilibili.com/video/%s' % (_v[0],_v[1],_v[2]))
    #_url_content = getContentFromFile('./alibaba.html')
    videos.extend(getListFromContentTextByPattern(BILIBILI_VIDEO_TABLE,_url_content))
    #for _v in videos:
    #    print('%s\t%s\t%s' % (_v[1],_v[2],_v[0]))
    print('total:%d' % len(videos))
#----------fetch bilibili playlist end  ------------------------#


#----------fetch my bilibili favorite begin------------------------#
    '''
    dict-python-structure
    upper:
        mid:
        name:
        face:
    videos:(dict-list)
    [{
            bvid:
            title:
            cover:
            pubtime:
    },...
    ]
    
    upperDict-structure
    {mid:videosByUpperDict}
    '''
def getMyBiliFavoriteList(existBvidDict):
    _html_url = 'https://api.bilibili.com/x/v3/fav/resource/list?media_id=471491601&pn=%d&ps=20&keyword=&order=mtime&type=0&tid=0&platform=web'
    upperDict = {}
    videoList = []
    temp_file = 'bilibili.json'
    video_sum = 0
    for index in []:#range(1,1):
        _url_content = getHtmlContentFromUrlByRquests(_html_url % index)
        #with open(temp_file,'w',encoding='utf-8') as jsfile :
        #    jsfile.write(_url_content)
        #exit()
        #_url_content = getContentFromFile(temp_file)
        favoriteJson = json.loads(_url_content)
        if 'data' in favoriteJson and 'medias' in favoriteJson['data']:
            for media in favoriteJson['data']['medias']:
                video_sum += 1
                videoInfoList = ['-' for x in range(0,12)]
                videoAlone = {}
                videoAlone['bvid'] = bvid = media['bvid']
                if existBvidDict.get(bvid):
                    continue
                videoInfoList[1] = 'https://www.bilibili.com/video/' + bvid
                videoAlone['title'] = media['title']
                videoInfoList[0] = deleteIllegalCharacter(videoAlone['title'])
                videoInfoList[2] = videoAlone['cover'] = media['cover']
                videoAlone['intro'] = media['intro']
                videoInfoList[3] = deleteIllegalCharacter(videoAlone['intro'])
                videoAlone['pubtime'] = media['pubtime']
    
                videoInfoList[9] = upperMid = str(media['upper']['mid'])
                videoInfoList[8] = upperName = media['upper']['name']
                videoInfoList[10] = upperFace = media['upper']['face']
                
                videoList.append(videoInfoList)

                currentUpperDict = upperDict.get(upperMid)
                if currentUpperDict:
                    currentUpperDict['videos'].append(videoAlone)
                else :
                    videosByUpperDict = {}
                    videosByUpperDict['name'] = upperName
                    videosByUpperDict['face'] = upperFace
                    videosByUpperDict['mid'] = upperMid
                    videosByUpperDict['videos'] = [videoAlone]
                    upperDict[upperMid] = videosByUpperDict
    #upperDictJsonStr = json.dumps(upperDict,ensure_ascii=False)
    #with open('myfavoritelist.json','w',encoding='utf-8') as jsonFile:
    #    jsonFile.write(upperDictJsonStr)
    print('video sum:%d\nupper sum:%d' % (video_sum,len(upperDict)))
    #outputUpperDict(upperDict,r'C:\E\work_myself\python_study\mygithub\garden\content\english\myfavoritelist.md')
    #------xlsx begin------#
    genXlsx('myfavoritelist.xlsx',videoList,_table_header=xlsx_table_header)
    #------xlsx end  ------#
#----------fetch my bilibili favorite end--------------------------#
#----------fetch my bilibili waitlist begin-------------------#
def getMyBiliWaitList(existBvidDict):
    _html_url = ''
    upperDict = {}
    videoList = []
    _url_content = getContentFromFile('bilibiliwaitlist.json')
    favoriteJson = json.loads(_url_content)
    video_sum = 0
    for media in favoriteJson['data']['list']:
        video_sum += 1
        videoInfoList = ['-' for x in range(0,12)]
        videoAlone = {}
        #-----reduce video begin-----#
        bvid = media['bvid']
        if existBvidDict.get(bvid):
            continue
        videoInfoList[1] = videoAlone['bvid'] = media['bvid']
        videoAlone['title'] = media['title']
        videoInfoList[0] = deleteIllegalCharacter(videoAlone['title'])
        videoInfoList[2] = videoAlone['cover'] = media['pic']
        videoAlone['intro'] = media['desc']
        videoInfoList[3] = deleteIllegalCharacter(videoAlone['intro'])
        videoAlone['dynamic'] = media['dynamic']
        videoInfoList[4] = deleteIllegalCharacter(videoAlone['dynamic'])
        videoAlone['firstframe'] = ''
        if 'first_frame' in media:
            videoAlone['firstframe'] = media['first_frame']
        videoInfoList[11] = videoAlone['firstframe']
        videoInfoList[5] = videoAlone['videolink'] = media['short_link_v2']
        videoInfoList[6] = videoAlone['typeid'] = str(media['tid'])
        videoInfoList[7] = videoAlone['typename'] = media['tname']
        
        upperMid = str(media['owner']['mid'])
        videoInfoList[8] = videoAlone['ownername'] = ownerName = media['owner']['name']
        videoInfoList[9] = videoAlone['ownermid'] = upperMid
        videoInfoList[10] = videoAlone['ownerface'] = ownerFace = media['owner']['face']
        videoList.append(videoInfoList)

        currentUpperDict = upperDict.get(upperMid)
        if currentUpperDict:
            currentUpperDict['videos'].append(videoAlone)
        else :
            videosByUpperDict = {}
            videosByUpperDict['name'] = ownerName
            videosByUpperDict['face'] = ownerFace
            videosByUpperDict['mid'] = upperMid
            videosByUpperDict['videos'] = [videoAlone]
            upperDict[upperMid] = videosByUpperDict
    #------text process begin------#
    #upperDictJsonStr = json.dumps(upperDict,ensure_ascii=False)
    #with open('mywaitlist.json','w',encoding='utf-8') as jsonFile:
    #    jsonFile.write(upperDictJsonStr)
    #print('video sum:%d\nupper sum:%d' % (video_sum,len(upperDict)))
    date_mark = time.strftime('%Y%m%d', time.localtime())
    outputUpperDict(upperDict,r'C:\E\work_myself\python_study\mygithub\garden\content\english\mywaitlist' + date_mark + '.md')
    #------text process end  ------#

    #------xlsx begin------#
    genXlsx('mywaitlist%s.xlsx' % date_mark,videoList,_table_header=xlsx_table_header,_xlsx_sheet='mywaitlist.xlsx')
    #------xlsx end  ------#
    return videoList

#----------fetch my bilibili waitlist end  -------------------#
def outputUpperDict(_dict,_output_file):
    mdstr = '''---
title: "%s"
date: %s
draft: false
---
'''
    mdTablePatternStr = '''|No|Title|Cover|Upper|
|-----:|:-----|:-----:|:-----|
'''
    no_sum = 0
    #------get title from '_output_file' begin------#
    sf = _output_file.replace('\\','/').split('/')
    title_str = sf[-1].split('.')
    st = time.strftime("%Y-%m-%dT%H:%M:%S%z", time.localtime())
    date_str = st.replace(st[-2:],':00')
    mdstr = mdstr % (title_str[0],date_str)
    #------get title from '_output_file' end--------#
    for k,v in _dict.items():
        for _video in v['videos']:
            no_sum += 1
            #print('%s\t%s\t%s' % (v['name'],_video['title'],_video['cover']))
            mdstr = mdstr + '%s\t[%s](%s)\n<img src="%s" style="width:200px"/>\n\n' % (v['name'],_video['title'],r'https://www.bilibili.com/'+_video['bvid'],_video['cover'])
            mdTablePatternStr = mdTablePatternStr + '|%d|[%s](%s)|![pic](%s)|%s|\n' % (no_sum,_video['title'].replace('|',''),r'https://www.bilibili.com/'+_video['bvid'],_video['cover'].replace('|',''),v['name'].replace('|',''))

    with open(_output_file,'w',encoding='utf-8') as mdFile:
       mdFile.write(mdstr)
    with open(_output_file.replace('.md','Table.md'),'w',encoding='utf-8') as mdTableFile:
       mdTableFile.write(mdTablePatternStr)

#------xlsx begin------#
def getFileNameFromAllPathName(_path_name):
    sf = _path_name.replace('\\','/').split('/')
    return sf[-1]

def deleteIllegalCharacter(_str):
    repl = re.compile('[\\:\/\?\*\[\]!&]|[\000-\010]|[\013-\014]|[\016-\037]')
    return repl.sub('',_str)
xlsx_table_header = ['title','bvid','cover','intro','dynamic','videolink','typeid','typename','ownername','ownermid','ownerface','firstframe']
def genXlsx(_xlsx_file,_videoList,_xlsx_sheet='',_table_header=[],_column_range=''):
    wb = ''
    ws = ''
    #-----if the xlsx file exist begin-----#
    if os.path.exists(_xlsx_file):
        wb = load_workbook(_xlsx_file)
        ws = wb[_xlsx_sheet]
    #-----if the xlsx file exist end-------#
    else:
        wb = opwb()
        ws = wb.active
        ws.title = getFileNameFromAllPathName(_xlsx_file)
        ws.append(_table_header)
        row1 = ws.row_dimensions[1]
        row1.fill = bg_color = PatternFill("solid", fgColor="0099CC00")
        ws.freeze_panes = 'A2'
        ws.row_dimensions[1].height = 30
        font = Font(size=20)
        for _row in ws['A1:L1']:
            for _cell in _row:
                _cell.fill = bg_color
                _cell.font = font
    for _d in _videoList:
        ws.append(_d)
    wb.save(_xlsx_file)
    wb.close()
#------xlsx end  ------#
#------from upperdict json to markdown begin------#
def genMarkdownFileFromUpperdictJson(upperDictJsonFile,markdownFile):
    with open(upperDictJsonFile,'r',encoding='utf-8') as f:
        outputUpperDict(json.loads(f.read()),markdownFile)
#------from upperdict json to markdown end--------#
#------遍历保存的视频bvid 以去除重复视频 begin-----#
def getExistVideo(xlsx_name,sheet_name,sheet_table_head_name,sheet_table_column_index):
    existBvidOfVideoDict = {} #{bvid:1}
    wbl = load_workbook(xlsx_name)
    ws = wbl[sheet_name]
    for _r in ws.rows:
        v = _r[sheet_table_column_index].value.strip()
        if sheet_table_head_name == v:
            continue
        curr_dict = existBvidOfVideoDict.get(v)
        if curr_dict:
            curr_dict[v] += 1
            print('exist:',v)
        else :
            existBvidOfVideoDict[v] = 1
    wbl.close()
    print(existBvidOfVideoDict)
    return existBvidOfVideoDict

#------遍历保存的视频bvid 以去除重复视频 end  -----#

def processBilibiliWaitList(new_json,excel_file):
    #获取稍后观看列表的json字串
    #从Excel文件中（本地持久化数据）获取保存的收藏视频id字典
    #组装waitlist列表中新添加的视频
    #更新Excel文件
    #生成新的md文件（table与非table各一份）
    pass

#------get Ixigua and Toutiao Favorite begin------#
def getIxiguaFavoriteList(_storedVideoIdDict,_html_text_file):
    _html_content = getContentFromFile(_html_text_file)
    bsp = BeautifulSoup(_html_content,'html.parser')
    videoPackList = bsp.select('.FeedContainer__itemWrapper')
    _videoList = []
    _hadRecordVideoIdDict = {}
    for vh in videoPackList:
        _videoInfo = ['-' for x in range(0,6)]
        vi = vh.select('.HorizontalFeedCard__rich__media > .HorizontalFeedCard__title')[0]

        _videoInfo[1] = vId = vi['href'].replace('&&','').replace('?&','')
        if _storedVideoIdDict.get(vId):
            _hadRecordVideoIdDict[vId] = 1
            continue
        if _hadRecordVideoIdDict.get(vId):
            _hadRecordVideoIdDict[vId] += 1
            _hadRecordVideoIdDict[str(vId)+title] = _hadRecordVideoIdDict[vId]
            continue
        else:
            _hadRecordVideoIdDict[vId] = 1
        title = vi['title']
        _videoInfo[0] = deleteIllegalCharacter(title)
        _videoInfo[2] = videoLink = 'https://www.ixigua.com%s' % vId
        upperInfo = vh.select('.HorizontalFeedCard__rich__media > .HorizontalFeedCard__author-name > .user__name')
        if len(upperInfo) > 0 :
            upperName = upperInfo[0]['title']
            _videoInfo[4] = deleteIllegalCharacter(upperName)
            _videoInfo[5] = upperHome = 'http://www.ixigua.com%s' % upperInfo[0]['href'].split('?')[0]
        #cover = base64.b64decode(vh.select('.tt-img-loaded')[0]['src']).decode('utf-8')
        _videoInfo[3] = cover = vh.select('.tt-img-loaded')[0]['src'].replace('&amp;','&')
        _videoList.append(_videoInfo)
    print('new video sum:%d' % len(_videoList))
    #print(_hadRecordVideoIdDict)
    genXlsx('ixiguafavorite.xlsx',_videoList,_xlsx_sheet='ixiguafavorite.xlsx',_table_header=['title','vId','videolink','cover','uppername','uppperhome'],_column_range='')

def getToutiaoFavoriteList():
    _html_content = getContentFromFile('toutiaofavorite.html')
    bsp = BeautifulSoup(_html_content,'html.parser')
    videos = bsp.select('.profile-normal-video-card-wrapper')
    articles = bsp.select('.profile-article-card-wrapper')
    weitoutiaos = bsp.select('.profile-wtt-card-wrapper')
    print('video sum:%d\narticle sum:%d\nweitoutiao sum:%d' % (len(videos),len(articles),len(weitoutiaos)))
    elementList = []
    for _v in videos:
        elementList.append(packToutiaoFavoriteFeedTuple(_v,'video'))
    for _art in articles:
        elementList.append(packToutiaoFavoriteFeedTuple(_art,'article'))
    for _wtt in weitoutiaos:
        elementList.append(packToutiaoFavoriteFeedTuple(_wtt,'weitoutiao'))
    #print(elementList)
    genXlsx('mytoutiaofavoritelist.xlsx',elementList,_table_header=['title','href','cover','type'])

pattern_html_s = re.compile('https|http')

def packToutiaoFavoriteFeedTuple(_target,_type):
    _e = _target.find_all('div',class_='feed-card-cover')
    if len(_e)>0 and len(_e[0].select('a')) > 0:
        _e = _e[0].select('a')[0]
        title = _e['title'] if _e.get('title') else '-'
        href = _e['href']
        cover = _e.select('img')[0]['src'].replace('&amp;','')
        cover = cover if pattern_html_s.match(cover) else 'https:'+cover
        return (title,href,cover,_type)
    _e = _target.find_all('div',class_='feed-card-article-l')
    if len(_e)>0 and len(_e[0].select('a')) >0:
        _e = _e[0].select('a')[0]
        title = _e['aria-label']
        href = _e['href']
        return (title,href,'-',_type)
    return (_target.prettify(),'-','-','None')
#------get Ixigua and Toutiao Favorite end--------#

if __name__ == '__main__':
    #getIxiguaFavoriteList({},'ixiguamyfavorite.txt')
    #getToutiaoFavoriteList()
    #exit()
    #getMyBiliFavoriteList({})
    getMyBiliWaitList({})
    exit()
    #genMarkdownFileFromUpperdictJson('mybilibilifavoritelist.json','mybilibilifavoritelist.md')
    #exit()
    hadvideodict = getExistVideo('myfavoritelist.xlsx','favoritelist','bvid',1)
    exit()

    videoList = []
    uppersum = 0
    videosum = 0
    upperdict = {}
    with open('mybilibilifavoritelist.json','r',encoding='utf-8') as f:
        upperdict = json.loads(f.read())
    for _upperid,_video in upperdict.items():
        uppersum += 1
        for v in _video['videos']:
            videoinfo = ['-' for x in range(0,12)]
            videosum += 1
            videoinfo[0] = deleteIllegalCharacter(v['title'])
            videoinfo[1] = 'https://www.bilibili.com/video/'+v['bvid'] 
            videoinfo[2] = v['cover']
            videoinfo[3] = deleteIllegalCharacter(v['intro'])
            videoinfo[8] = _video['name']
            videoinfo[9] = str(_video['mid'])
            videoinfo[10] = _video['face']
            videoList.append(videoinfo)
            #break
        #break
    print('upper sum:%d;video sum:%d' % (uppersum,videosum))
    genXlsx('myfavoritelist.xlsx',videoList,_table_header=xlsx_table_header)
