#from asyncio.windows_events import NULL
import requests,re,json,html2text,sys,time,urllib.parse
from bs4 import BeautifulSoup
from array import array
import time 
from urllib.request import urlretrieve
import os
import shelve
import aiohttp
import asyncio
from datetime import datetime
from functools import wraps
import random
import os.path
import logging
from logging.handlers import TimedRotatingFileHandler
from logging.handlers import RotatingFileHandler
from urllib.parse import quote,unquote
import html
import json

class WeiboVideoFetcher():
    def __init__(self, headerFile):
        self.headerFile = headerFile
        self.get_header_from_file()
        self.get_cookies_from_file()
        self.setVideoDefList()
        self.setPicDefList()
        self.fileIdx = 0

    def setVideoDefList(self):
        self.videoDefList = [
            "hevc_mp4_720p",
            "mp4_720p_mp4",
            "inch_5_5_mp4_hd",
            "inch_5_mp4_hd",
            "inch_4_mp4_hd",
            "stream_url_hd",
            "stream_url"
        ]

    def setPicDefList(self):
        self.picDefList = [
            "largest",
            "original",
            "large",
            "mw2000"
        ]

    def getVideoUrl(self, data, video_list):
        if 'media_info' in data:
            for i in self.videoDefList:
                if i in data['media_info'] and len(data['media_info'][i]):
                    video_list.append(data['media_info'][i])

    def getPicUrl(self, data, currentPic, currentVideo):
        for k, v in data.items():
            if 'video' in v:
                currentVideo.append(v['video'])
            else:
                for i in self.picDefList:
                    if i in v:
                        currentPic.append(v[i]["url"])
                        break

    def get_target_meida_list(self, targetIndex):
        since_id = ""
        url = ""
        videoList = []
        picList = []
        for i in range(0, targetIndex):
            try:
                if since_id == "":
                    url = "https://weibo.com/ajax/statuses/mymblog?uid=%s&page=1&feature=0" % (self.uid)
                else:
                    url = "https://weibo.com/ajax/statuses/mymblog?uid=%s&page=%d&feature=0&since_id=%s" % (self.uid, i + 1, since_id)
                response = requests.get(url, headers = self.header, cookies = self.cookies, timeout=10, verify=False)
                print("get url end")
                data = json.loads(str(response.content, encoding='utf-8'))
                since_id = data['data']["since_id"]
                if i == targetIndex - 1:
                    currentVideo = []
                    [self.getVideoUrl(j['page_info'], currentVideo) for j in data['data']["list"] if 'page_info' in j]
                    [[self.getVideoUrl(k['data'], currentVideo) for k in j['mix_media_info']['items']] for j in data['data']["list"] if 'mix_media_info' in j]
                    currentPic = []
                    [self.getPicUrl(j['pic_infos'], currentPic, currentVideo) for j in data['data']["list"] if 'pic_infos' in j]
                    print([j['created_at'] for j in data['data']["list"] if 'created_at' in j])
                    videoList.extend(currentVideo)
                    picList.extend(currentPic)
            except Exception as e:
                print("error")
                print(e)
        return videoList, picList

    def get_header_from_file(self):
        header = {}
        with open(self.headerFile, 'r') as f:
            for i in f.readlines():
                header[i[:i.find(":")].strip().lower()] = i[i.find(":") + 1:].strip()
        for k in list(header.keys()):
            if k.startswith(':') or k == 'cookie' or len(k) == 0:
                del header[k]
        self.header = header

    def get_cookies_from_file(self):
        with open(self.headerFile, 'r') as f:
            for i in f.readlines():
                if i.startswith('cookie') or i.startswith('Cookie'):
                    self.cookies = {x.strip().split('=')[0] : x.strip().split('=')[1] for x in i[i.find(':') + 1 : ].strip().split(';')}
                    break

    def get_blogger_uid_by_name(self, name, uid):
        # url = 'https://weibo.com/aj/relation/attention?ajwvr=6&q=' + name
        # get_url = requests.get(url, headers = self.header, cookies = self.cookies, timeout=30)
        # codingTypr = get_url.encoding
        # text = get_url.text.encode(codingTypr, errors='ignore').decode('utf-8', errors='ignore')
        # result = json.loads(text)
        # uidList = [x["uid"] for x in result["data"]]
        # print(uidList)
        # if len(uidList) > 0:
        #     self.uid = uidList[0]
        # else:
        #     self.uid = uid
        self.uid = uid
        self.baseUrl = "https://weibo.com/u/" + self.uid
        print(self.baseUrl)

def start_fetch(uid, start = 1, end = 5):
    videoResult = {}
    imgList = set()
    dbName = 'weibo_recorder'
    # 释放检查重复
    is_check_dup = True

    startIdx = start
    endIdx = end
    postName = ''

    obj = WeiboVideoFetcher('header.txt')
    obj.get_blogger_uid_by_name(postName, uid)

    videoResult = []
    picResult = []
    for i in range(startIdx, endIdx + 1):
        print(f'start get page {i}')
        videoResultTmp, picResultTmp = obj.get_target_meida_list(i)
        videoResult.extend([x for x in videoResultTmp if x is not None])
        picResult.extend([x for x in picResultTmp if x is not None])

    if is_check_dup:
        dbase = shelve.open(dbName)
    else:
        dbase = {}
    videoList = {}
    if 'video_list' in dbase and is_check_dup:
        videoList = dbase['video_list']
    weiboId = obj.uid
    videoListToPrint = []
    if weiboId in videoList:
        for i in videoResult:
            key = i[7:i.find('?')]
            if key not in videoList[weiboId]:
                videoList[weiboId][key] = i
                videoListToPrint.append(i)
    else:
        videoListToPrint = videoResult
        videoList[weiboId] = {x[7:x.find('?')] : x for x in videoResult}
    
    imgList = {}
    if 'img_list' in dbase and is_check_dup:
        imgList = set(dbase['img_list'])
    imgListToPrint = []
    for i in picResult:
        if i not in imgList:
            imgListToPrint.append(i)
    if is_check_dup:
        dbase['video_list'] = videoList
        dbase['img_list'] = imgList.union(set(imgListToPrint))
        dbase.close()

    with open('result.txt', 'w', encoding = 'utf-8') as f:
        for i in set(videoListToPrint):
            f.write(i + '\n')

    with open('result_img.txt', 'w', encoding = 'utf-8') as f:
        for i in set(imgListToPrint):
            f.write(i + '\n')


if __name__ == "__main__":
    start_fetch("6583086815")