from asyncio.windows_events import NULL
import requests,re,json,html2text,sys,time,urllib.parse
from bs4 import BeautifulSoup
from array import array
import time 
from urllib.request import urlretrieve
import os
import shelve
import aiohttp
import asyncio
from datetime import datetime
from functools import wraps
import random
import os.path
import logging
from logging.handlers import TimedRotatingFileHandler
from logging.handlers import RotatingFileHandler
from urllib.parse import quote,unquote
import html
import json

class WeiboVideoFetcher():
    def __init__(self, headerFile):
        self.headerFile = headerFile
        self.get_header_from_file()
        self.get_cookies_from_file()
        self.fileIdx = 0

    def get_video_list(self, htmlData):
        soup = BeautifulSoup(htmlData, "html.parser")
        liList = [x for x in soup.find_all('li') if 'class' in x.attrs and len(x.attrs["class"]) > 0]
        self.fileIdx = self.fileIdx + 1
        with open('outhtmlData%d.txt' % (self.fileIdx), 'w', encoding = 'utf-8') as f:
            f.write(htmlData)
        strList = {}
        imgList = []
        for i in liList:
            if 'video-sources' in i.attrs:
                tmpStr = unquote(unquote(i.attrs['video-sources'], 'utf-8'), 'utf-8')
                keyMap = {'1080' : '&1080=http', '720' : '&720=http', '480' : '&480=http', '360' : 'fluency=http'}
                endStr = ',video'
                for k, v in keyMap.items():
                    if tmpStr.find(v) != -1:
                        source = tmpStr[tmpStr.find(v) + len(v) - len('http') : tmpStr.find(endStr, tmpStr.find(v)) + len(endStr)]
                        videoId = source.split('?')[0][7:]
                        strList[videoId] = {source : k}
                        break
            if 'action-type' in i.attrs and i.attrs['action-type'] == 'fl_pics' and i.attrs['action-data'] is not NULL:
                tmpMap = {x.split('=')[0] : x.split('=')[1] for x in i.attrs['action-data'].split('&')}
                if 'pic_id' in tmpMap:
                    imgList.append(tmpMap['pic_id'])

        print(len(strList))
        print(strList)
        print(len(imgList))
        print(imgList)
        return strList, imgList

    def get_all_video_list(self):
        get_url = requests.get(self.baseUrl, headers = self.header, cookies = self.cookies)
        codingTypr = get_url.encoding
        text = get_url.text.encode(codingTypr, errors='ignore').decode('utf-8', errors='ignore')
        # with open('out.txt', 'w', encoding = 'utf-8') as f:
        #     f.write(get_url.text)
        lines = text.split('\n')
        self.sysConfig = {x[x.find("'") + 1 : x.find("'", x.find("'") + 1)] : x[x.rfind("'", 0, x.rfind("'") - 1) + 1 : x.rfind("'")] for x in lines if x.find('$CONFIG') != -1}
        maxLine = [x for x in lines if len(x) == max([len(y) for y in lines])][0]
        jsonContext = maxLine[maxLine.find('(') + 1 : maxLine.rfind(')')]
        htmlData = json.loads(jsonContext)['html']
        videoMap, imgList = self.get_video_list(htmlData)

        # 第一次懒加载
        tmpVideoMap, tmpImgList = self.get_roll_html_video_list(0)
        videoMap.update(tmpVideoMap)
        imgList.extend(tmpImgList)

        # 第二次懒加载
        tmpVideoMap, tmpImgList = self.get_roll_html_video_list(1)
        videoMap.update(tmpVideoMap)
        imgList.extend(tmpImgList)
        return videoMap, imgList

    def get_header_from_file(self):
        header = {}
        with open(self.headerFile, 'r') as f:
            for i in f.readlines():
                header[i.split(':')[0].strip()] = i.split(':')[1].strip()
        for k in list(header.keys()):
            if k.startswith(':') or k == 'cookie' or len(k) == 0:
                del header[k]
        self.header = header

    def get_cookies_from_file(self):
        with open(self.headerFile, 'r') as f:
            for i in f.readlines():
                if i.startswith('cookie'):
                    self.cookies = {x.strip().split('=')[0] : x.strip().split('=')[1] for x in i[i.find(':') + 1 : ].strip().split(';')}
                    break

    def get_roll_html_video_list(self, rollPeriod):
        rollHeader = {
            'ajwvr': 6,
            'domain': self.sysConfig['domain'],
            'from': 'myfollow_all',
            'is_all': 1,
            'pagebar': rollPeriod,
            'pl_name': 'Pl_Official_MyProfileFeed__26',
            'id': self.sysConfig['page_id'],
            'script_uri': '/u/' + self.sysConfig['oid'],
            'feed_type': 0,
            'page': 1,
            'pre_page': 1,
            'domain_op': self.sysConfig['domain'],
            '__rnd': int(round(time.time() * 1000))
        }
        
        rollUrl = 'https://weibo.com/p/aj/v6/mblog/mbloglist?' + "&".join([k + "=" + str(v) for k, v in rollHeader.items()])
        get_url = requests.get(rollUrl, headers = self.header, cookies = self.cookies)
        codingTypr = get_url.encoding
        text = get_url.text.encode(codingTypr, errors='ignore').decode('utf-8', errors='ignore')
        htmlData = json.loads(text)['data']
        return self.get_video_list(htmlData)

    def get_video_download_url(self, url):
        # vid = url.split('=')[1]
        vid = '4687901330702452'
        detailUrl = 'https://weibo.com/tv/api/component?page=' + quote('/tv/show/1034:%s' % vid, 'utf-8')
        tmp1 = {"time":int(round(time.time() * 1000)),"dm_pub_total":20,"chat_group_client":0,"chat_group_notice":3,"allcountNum":652,"msgbox":0}
        self.cookies['webim_unReadCount'] = quote(json.dumps(tmp1), 'utf-8')
        data = 'data={"Component_Play_Playinfo":{"oid":"1034:4687901330702452"}}'
        # data = {
        #     'data' : {"Component_Play_Playinfo":{"oid":"1034:%s" % vid }},
        # }
        result = requests.post(detailUrl, headers = self.header, cookies = self.cookies, data = data)
        codingTypr = result.encoding
        text = result.text.encode(codingTypr, errors='ignore').decode('utf-8', errors='ignore')
        with open('out.txt', 'w', encoding = 'utf-8') as f:
            f.write(text)

    def get_blogger_by_name(self, name, page):
        url = 'https://weibo.com/aj/relation/attention?ajwvr=6&q=' + name
        get_url = requests.get(url, headers = self.header, cookies = self.cookies)
        codingTypr = get_url.encoding
        text = get_url.text.encode(codingTypr, errors='ignore').decode('utf-8', errors='ignore')
        result = json.loads(text)
        urlList = ['https://weibo.com/u/' + x["uid"] + '?is_search=0&visible=0&is_all=1&is_tag=0&profile_ftype=1&page=' + str(page) + '#feedtop' for x in result["data"]]
        print(urlList)
        self.baseUrl = urlList[0]

if __name__ == "__main__":
    # get_base_html('https://weibo.com/u/5449334139?from=myfollow_all')
    result = {}
    imgList = set()
    dbName = 'weibo_recorder'

    pageIdx = 1
    # postName = '亲亲奥利给'
    # postName = '艺装堂'
    # postName = '写真store'
    # postName = '小牧民sp'
    # postName = '艺术舞'
    # postName = '小小迷妹热舞'
    # postName = '可叔'
    # postName = 'AfreecaTV舞蹈录制'
    # postName = 'AF高质量视频'
    # postName = '真诚录制'
    # postName = '录像站'
    # postName = '中国腿模网'
    # postName = '超模御姐'
    # postName = '雨的精灵'
    # postName = '艾尚街拍'
    # postName = '小牧民sp'
    # postName = '直播舞娇娘'
    # postName = 'VPlus女神'
    # postName = '天菜姐'
    # postName = '颜冰謦'
    # postName = 'Tiger凌云'
    # postName = 'af尼美舞蹈'
    # postName = '街个拍'
    postName = '传奇女司机'
    # postName = '冒牌四郎'
    # postName = '人类高质量女郎'
    # postName = '有妹纸学院'

    obj = WeiboVideoFetcher('header.txt')
    obj.get_blogger_by_name(postName, pageIdx)
    baseUrl = obj.baseUrl

    result, imgListResult = obj.get_all_video_list()

    dbase = shelve.open(dbName)
    videoList = {}
    if 'video_list' in dbase:
        videoList = dbase['video_list']
    weiboId = baseUrl[baseUrl.find('/u/') + len('/u/') : baseUrl.find('?')]
    if weiboId in videoList:
        for i in list(result.keys()):
            if i in videoList[weiboId]:
                del result[i] # 注释掉后，就跳过重复检查
                pass
            else:
                videoList[weiboId][i] = result[i]
    else:
        videoList[weiboId] = result
    
    if 'img_list' in dbase:
        imgList = set(dbase['img_list'])
    imgListToPrint = []
    for i in imgListResult:
        if i not in imgList:
            imgListToPrint.append(i)
    dbase['video_list'] = videoList
    dbase['img_list'] = imgList.union(set(imgListToPrint))
    dbase.close()

    print("start write txt")
    with open('result.txt', 'w', encoding = 'utf-8') as f:
        for k, v in result.items():
            f.write(k + '\n')
            for k1, v1, in v.items():
                f.write(v1 + '\n')
                f.write(k1 + '\n')

    with open('result_img.txt', 'w', encoding = 'utf-8') as f:
        for i in imgListToPrint:
            f.write('https://wx3.sinaimg.cn/large/' + i + '.jpg\n')