from cron_converter import Cron
import time, datetime, functools
import os, feedparser
from urllib.request import ProxyHandler
from common import Config, DbHelper
from novel import NovelCrawler
from faq import Faq

class YoutubeCrawler(Config):
    def __init__(self, save_path, proxy_url):
        self.save_path = save_path + '/rssfiles'
        self.proxy_url = proxy_url
        self.db = DbHelper()

    def crawl(self):
        channel_list = self.load_config('youtube.json5')
        # {'name': '金钱爆', 'rss_url': 'https://m.147xs.org/book/136540/', 'page_size': 2}
        for channel in channel_list:
            if self.__check_channel(channel.get('cron', None)):
                self.crawl_one(channel)              
        
        self.db.close()

    def __check_channel(self, cron_str: str):
        diff = self.getDiff_prevCron_now(cron_str)
        return not diff or (diff>0 and diff<3*60*60)  # 播完后不超过3小时

    
    def md5_encrypt(self, input_str):  
        import hashlib
        m = hashlib.md5()  
        m.update(input_str.encode('utf-8'))  
        return m.hexdigest() 

    def crawl_one(self, channel):
        channel_url = channel['rss_url']
        page_size = channel['page_size']
        channel_name = channel['name']
        print(f'Youtube: Crawl {channel_name}' )
        if "youtube.com" in channel_url:
            proxy_handler = ProxyHandler({'http': self.proxy_url, 'https': self.proxy_url})
            rss = feedparser.parse(channel_url, handlers=[proxy_handler])
            if rss['bozo']:     # proxy代理无法连接'
                print(f"ERROR: 代理服务器无法连接，跳过：{channel_url}")
                return
        else:
            rss = feedparser.parse(channel_url)

        for entry in rss['entries'][:page_size]:
            video_url = entry['link']
            if self.db.is_existed_rss_item(video_url):
                continue
            # self.db.insert_rss_item(entry['title'], video_url, '')

            # 过滤还未开播（预告）的影片
            try:
                if int(entry['media_statistics']['views'])==0:
                    continue
            except KeyError:
                pass
            
            exclude = channel.get('exclude', None)
            if exclude and exclude in entry['title']:
                continue

            include = channel.get('include', None)
            if include and include not in entry['title']:
                continue

            print("DEBUG: 下载 "+entry['title'] + ", url=" + video_url)
            video_id = None
            if "youtube.com" in video_url:
                video_id = video_url[video_url.rindex('=')+1:]
                ret = os.system(f"yt-dlp -f worstaudio --proxy {self.proxy_url} --external-downloader aria2c -o {video_id}.m4a {video_url}")
            elif "bilibili.com" in video_url:
                # https://www.bilibili.com/video/BV1WP411c7j9/?spm_id_from=333.999.0.0
                video_id = self.md5_encrypt(video_url)
                # 下载的文件时间为视频的发布时间，而非下载时间
                try:
                    ret = os.system(f"yt-dlp -f 0 --external-downloader aria2c -o {video_id}.m4a {video_url}")
                except e:
                    ret = -1
                    print("ERROR: yt-dlp bilibili: "+str(e))
            else:
                continue

            if ret==0:
                # touch后面接一个已经存在的文件，则该文件的3个时间（atime/ctime/mtime）都会更新为当前时间。
                ret = os.system(f"mv ./{video_id}.m4a {self.save_path} ; touch {self.save_path}/{video_id}.m4a")
                if ret==0:
                    try: # try/except好像不起作用
                        # published = entry['published']
                        ymd = datetime.datetime.now().strftime('%Y%m%d')
                        self.db.insert_rss_item(ymd+entry['title'], video_url, f'http://dev.cddyys.com:8180/mynotes/rssfiles/{video_id}.m4a')
                    except Exception as e:
                        print("ERROR: insert rss item: "+str(e))



def crawl_youtube():
    print('>>>>>>>>>> youtube:\t'+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
    # crawler = YoutubeCrawler('/home/gjh/mynotes/', 'http://192.168.31.77:1083')
    crawler = YoutubeCrawler('/home/gjh/mynotes/', 'http://127.0.0.1:10809')
    crawler.crawl()

if __name__ == '__main__':
    crawl_youtube()