# -*- coding: UTF-8 -*-
# cython: language_level=3
import re
from concurrent.futures import as_completed
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.parse import urljoin

import requests
from lxml import etree
from requests.utils import get_encodings_from_content

from base.base_parser import pass_urls
from db.mongodb import MongoDB
from db.proxy_db import get_proxy
from utils.download_url_transform import download_url_transform
from utils.own_tools import fake_ua, get_host, is_file_url, is_keyword_in_text, raise_error_site_name

MAX_thread = 8

regular_video_fuzzy = ['视频', '播放', '观看', '直播']
regular_video_script = ['var player', 'm3u8', 'vodplayer', 'allowFullScreen', '视频', '播放器']
regular_video_url = ['.m3u8', '.ts', '.mp4', '.mkv', '.avi', '.flv', '.wmv', '.asf', '.mov', '.webm', '.3gp']
# '.mp3', '.mpeg', '.wma', '.flac', '.aac', '.ape']
regular_video_player = ['vodPlayer', 'vodplayer', 'doPlayer', 'bofqi', 'bofang', 'Bofang', 'BoFang', 'video', 'Video',
                        'dplayer', 'dplayer-mask', 'dplayer-video-wrap', 'dpplayer', 'video-player', 'movie-player']


class video_check:
    def __init__(self, url):
        # print('begin:  ', url)
        self.Confidence = 0
        self.url = url

        self.index_url = re.search(r'(https?://[^/]+)', url).group(1)

        fake_headers = {'User-Agent': fake_ua.random, 'Content-Type': 'text/html;charset=UTF-8'}
        proxy = get_proxy()
        html = requests.get(self.url, headers=fake_headers, proxies=proxy, timeout=(2.1, 10.1), verify=False,
                            allow_redirects=True, stream=True)
        if 'text/html' not in html.headers['Content-Type'] or (
                'Content-Length' in html.headers and int(html.headers['Content-Length']) > 10000000):
            # print(html.headers['Content-Type'], html.headers['Content-Length'])
            print('not html', url)
            raise Exception('not html')

        # print( html.encoding)
        if not html.encoding or html.encoding == 'ISO-8859-1':
            encodings = get_encodings_from_content(html.text)
            encodings = encodings[0] if encodings else html.apparent_encoding

            if (not encodings) or (encodings.lower() not in ['gbk', 'utf-8', 'utf8', 'gb2312']):
                html.encoding = 'gbk'
            else:
                html.encoding = encodings
        # print(html.encoding)
        self.htmlstr = html.text
        self.html = etree.HTML(self.htmlstr)
        print(self.htmlstr)

        self.title = ''
        reg_title = '//title/text()'
        if self.html.xpath(reg_title):
            self.title = self.html.xpath(reg_title)[0]

    def get_download_url(self):
        result = set()

        for reg in [r'ed2k:[0-9a-zA-Z|\-/%.=]+', r'magnet:\?xt=[0-9a-zA-Z/%.=:&;]+']:
            for download_url in re.findall(reg, self.htmlstr):
                result.add(download_url)

        for reg in [r'[Tt]hunder://[0-9a-zA-Z=&+/_]+', r'[Ff]lashget://[0-9a-zA-Z=&+/_]+', r'qqdl://[0-9a-zA-Z=&+/_]+']:
            for download_url in re.findall(reg, self.htmlstr):
                try:
                    real_url = download_url_transform(download_url)
                    result.add(real_url)
                except:
                    result.add(download_url)

        for download_url_BDY in re.findall(r'https://pan.baidu.com/s/[0-9a-zA-Z_-]+', self.htmlstr):
            result.add(download_url_BDY)
        # for bdyyzm in re.findall(r'提取码.*?([0-9a-zA-Z]{4})', self.htmlstr): #todo 验证码
        #     result.add( '百度网盘提取码：'+bdyyzm)

        return result

    def get_hrefs(self):
        # 获取html中相关链接
        reg_href = '//@href'
        reg_src = '//@src'
        reg_value = '//@value'
        a = self.html.xpath(reg_href)
        b = self.html.xpath(reg_src)
        c = self.html.xpath(reg_value)
        hrefs = set()

        # reg_script = '//script/text()'
        # d = self.html.xpath(reg_script)
        # for script in d:
        #     scripts = re.findall(r'''src.*?['"](.*?)['"<>\s]''', script)
        #     for url_path in scripts:
        #         if len(url_path) < 8:
        #             continue
        #         if url_path.endswith('.css') or url_path.endswith('.jpg') or url_path.endswith(
        #                 '.gif') or url_path.endswith('.png') or url_path.endswith(
        #             '.js') or '.js?' in url_path or url_path.endswith('.ico'):
        #             pass
        #         else:
        #             hrefs.add(url_path)
        #
        # e = re.findall(r'https?://[^\s\'"<>]+', self.htmlstr)
        # f = re.findall(r'''['"](/[0-9a-zA-Z/?%&#_.]{8,}[^\s'"<>])''', self.htmlstr)
        # g = re.findall(r'''ftp://[^"'<>\s]+''', self.htmlstr)
        #
        # href_list = a + b + c + e + f + g
        href_list = a + b + c
        for url_path in href_list:
            if len(url_path) < 8:
                continue
            if url_path.endswith('.css') or url_path.endswith('.jpg') or url_path.endswith('.gif') or url_path.endswith(
                    '.png') or url_path.endswith('.js') or '.js?' in url_path or url_path.endswith('.ico'):
                pass
            else:
                hrefs.add(url_path)
        return hrefs

    def format_url(self, urls):
        url_paths = set()
        for url_path in urls:
            url_path = url_path.strip()

            if not url_path.startswith('http'):
                url_path = urljoin(self.index_url, url_path)
            url_paths.add(url_path)
        return url_paths

    def check_video_url(self):
        hrefs = self.get_hrefs()
        # print(hrefs)
        video_hrefs = set()
        for href in hrefs:
            '''
            常见视频网站外链
            '''
            flag = 0
            waiLian = ['f.v.17173cdn.com', 'v.163.com', 'v.zol.com.cn',
                       'player.56.com', 'player.ku6.com', 'player.pptv.com/v/', 'player.bilibili.com/player.html?aid=',
                       'player.youku.com', 'tudou.com/v/', 'yuntv.letv.com', 'sogou.player.vodjk.com', 'tv.sohu.com',
                       'imgcache.qq.com/tencentvideo', 'v.qq.com/iframe/player', 'v.qq.com/txp/iframe/player.html?vid=',
                       'open.iqiyi.com/developer/player_js/coopPlayerIndex.html?vid=', 'player.video.iqiyi.com/']
            for url_wai in waiLian:
                if url_wai in href:
                    flag = 1
                    self.Confidence += 2000
                    video_hrefs.add(href)
                    break
            if flag == 1:
                continue
            '''
            跳转常见网站播放页
            '''
            vsite_index_list = ['https://v.qq.com/x/cover/', 'https://www.iqiyi.com/v_',
                                'https://v.youku.com/v_show/id_', 'https://tv.sohu.com/v/',
                                'https://video.tudou.com/v/', 'https://www.ku6.com/video/detail?id=',
                                'http://www.56.com/', 'https://v.pptv.com/show/', 'https://v.ifeng.com/c/',
                                'https://www.mgtv.com/b/', 'https://www.bilibili.com/video/av',
                                'http://www.le.com/ptv/vplay/', 'http://zol.iqiyi.com.cn/video']
            for vsite_index in vsite_index_list:
                if href.startswith(vsite_index):
                    flag = 1
                    self.Confidence += 2000
                    video_hrefs.add(href)
                    break
            if flag == 1:
                continue

            if 'map.swf' in href or '/banner/default/flv.mp4' in href:  # todo 跳过常见无意义视频
                continue

            # if 'image' not in href and 'pic' not in href: 网站命名不规范误判
            # 匹配成功返回视频地址
            for vreg in regular_video_url:
                if href.lower().endswith(vreg) or (vreg + '?') in href.lower() or (vreg + '&') in href.lower():
                    self.Confidence += 2000
                    video_hrefs.add(href)
                elif href.lower().endswith('swf'):
                    if '/v/' in href.lower() or 'media' in href.lower() or 'video' in href.lower() or 'http://player.' in href.lower():
                        self.Confidence += 50
                        video_hrefs.add(href)
        video_hrefs = self.format_url(video_hrefs)
        for download_url in self.get_download_url():
            for vreg in regular_video_url:
                if download_url.lower().endswith(vreg) or (vreg + '?') in download_url.lower() or (
                        vreg + '&') in download_url.lower() or (
                        vreg + '|') in download_url.lower():
                    self.Confidence += 2000
            video_hrefs.add(download_url)
        video_hrefs = str(list(video_hrefs))
        return video_hrefs

    def check_video_player(self):
        # 抓取player代码
        reg_video = "//video"
        reg_embed = "//embed[not(contains(@src,'image'))and not(contains(@src,'img'))]"
        result_video_player = ''
        if self.html.xpath(reg_video):
            result_video_player += '(video); '
            self.Confidence += 100
        if self.html.xpath(reg_embed):
            result_video_player += '(embed); '
            self.Confidence += 30
        reg_script = '//script/text()'
        scripts = self.html.xpath(reg_script)
        for script in scripts:
            if '图片' in script or '广告' in script or 'var ad' in script:
                continue
            elif '2.ss.faisys.com' in script or '百度视频' in script or '必应视频' in script:
                self.Confidence += 10
                continue
            for reg_scr in regular_video_script:
                scripts = re.search(reg_scr, script)
                if scripts:
                    result_video_player += '(script中含有 "%s");' % reg_scr
                    self.Confidence += 20

        for i in regular_video_player:
            reg_id = "//*[(contains(@id,'%s')) and not(contains(@id,'Img')) and not(contains(@id,'img'))  and not(contains(@id,'pic')) and not(contains(@id,'ad'))]/@id" % i
            reg_class = "//*[contains(@class,'%s') and not(contains(@class,'Img')) and not(contains(@class,'img')) and not(contains(@class,'pic')) and not(contains(@class,'ad'))]/@class" % i
            a = self.html.xpath(reg_id)
            b = self.html.xpath(reg_class)
            if a:
                result_video_player += '(id contains "%s")' % a
                self.Confidence += 20 * len(a)
            if b:
                result_video_player += '(class contains "%s")' % b
                self.Confidence += 20 * len(b)
        F = ['flash', 'Flash']
        flash = list()
        for i in F:
            reg_f_id = "//*[(contains(@id,'%s')) and not(contains(@id,'Img')) and not(contains(@id,'img'))  and not(contains(@id,'pic')) and not(contains(@id,'ad'))]/@id" % i
            reg_f_class = "//*[contains(@class,'%s') and not(contains(@class,'Img')) and not(contains(@class,'img')) and not(contains(@class,'pic')) and not(contains(@class,'ad'))]/@class" % i
            f = self.html.xpath(reg_f_id)
            f += self.html.xpath(reg_f_class)
            if f:
                self.Confidence += 5
                for fla in f:
                    if 'video' in fla or 'Video' in fla:
                        self.Confidence += 10
                    flash.append(fla)
                result_video_player += '(flash contains "%s")' % flash
        return result_video_player

    def is_viedo(self):
        # 获取页面内href，检查链接中视频
        video_url = self.check_video_url()
        result_player = self.check_video_player()
        result = {'title': self.title, 'Confidence': self.Confidence, 'video_url': video_url,
                  'video_player': result_player, 'url': self.url}
        return result

    def __del__(self):
        print(self.url + '  ' + '检测完成')


# @retry(stop_max_delay=3100, stop_max_attempt_number=5, wait_fixed=600)
def is_video_(url):
    try:
        result = video_check(url)
        # print(result.is_viedo())
        return result
    # except TypeError or ConnectTimeout or ConnectTimeoutError:
    #     print('网络请求失败')
    #     return False
    except Exception as e:
        # print(e)
        print('网络请求失败')
        return False
    #     print(url, e)
    #     return False


class site_check:
    def __init__(self, item, depath=3, fxck_oo=False):
        """

        @param item:
        @param depath: 网页深度，默认三层主页为第一层
        """
        self.host = item['host']
        self.index_url = item['home_page']
        self.spider_depath = depath
        self.fxck_oo = fxck_oo
        self.page_check = is_video_(self.index_url)

        self.site_Confidence = 0

        self.video_license = item.get('license', '')
        self.icp = item.get('icp', '')
        if self.video_license != '未检测到' and self.video_license:
            self.site_Confidence += 200
        self.db = MongoDB()

    def format_url(self, urls, page_url=None):
        url_paths = set()
        for url_path in urls:
            url_path = url_path.strip()
            # 判断网址合法性
            if is_file_url(url_path, ['jpg', 'jpeg', 'gif', 'png', 'ico', 'js', 'css', 'webp', 'xml', 'svg',
                                      'tiff', 'ttf', 'woff', 'woff2',
                                      'mp3', 'mp4', 'flv', 'm3u8', 'mov', 'wmv', 'avi', 'mpg', 'mkv', 'mpeg',
                                      'exe', 'pdf', 'doc', 'docx', 'ppt', 'pptx', 'xls', 'xlsx', 'apk'
                                      ]):
                continue
            if is_keyword_in_text(url_path, pass_urls):
                continue

            if url_path.startswith('http'):
                if self.host not in url_path.split('/')[2]:
                    # 外链存数据库
                    _host = get_host(url_path)
                    self.db.add('todo_urls', {'home_page': _host['home_page'], 'host': _host['host'], 'status': 0})
                    continue
            else:
                url_path = urljoin(page_url or self.index_url, url_path)
            url_paths.add(url_path)
        return url_paths

    def get_all_url(self, html, max=False, page_url=None):
        xpath_all_url = '//a/@href'
        urls = html.xpath(xpath_all_url)
        if not max:
            del urls[1000:]
        url_paths = list(self.format_url(urls, page_url))
        if not max:
            del url_paths[300:]
        return url_paths

    def site_have_video(self):
        if not self.page_check:
            return
        # 检测当前网页中是否存在视频
        site_Confidences = list()
        seen = set()

        page_son_list = list()

        page_depath = self.page_check  # html对象
        result = page_depath.is_viedo()
        result["host"] = self.host
        result["depath"] = 0
        self.db.add('WEB_search_info', result)
        if result["Confidence"]:
            site_Confidences.append(result["Confidence"])
        page_son_list.extend(self.get_all_url(page_depath.html))
        seen.add(self.index_url)

        executor = ThreadPoolExecutor(max_workers=MAX_thread)

        for path in range(1, self.spider_depath):
            obj_list = []

            if self.fxck_oo:
                page_son_list = page_son_list[:300]
                # page_son_list=page_son_list[:300**path]

            for url in page_son_list:
                if url not in seen:
                    seen.add(url)
                    obj = executor.submit(is_video_, url)
                    obj_list.append(obj)

            del page_son_list[:]

            for page_depath in as_completed(obj_list):
                page_depath = page_depath.result()
                if page_depath:
                    result = page_depath.is_viedo()
                    result["depath"] = path
                    result["host"] = self.host

                    try:
                        title = result['title']
                        raise_error_site_name(title)
                        self.db.add('WEB_search_info', result)
                    except:
                        pass
                    site_Confidences.append(result["Confidence"])
                    page_son_list.extend(self.get_all_url(page_depath.html, page_url=result['url']))

        site_Confidences.sort(reverse=True)
        self.site_Confidence += sum(site_Confidences[:5])
        page_num = len(site_Confidences)

        result = {'site_Confidence': self.site_Confidence, 'page_num': page_num}
        return result

    def __del__(self):
        try:
            if self.page_check:
                print('*' * 150)
                print('*' * 50 + ' ' * 5 + self.index_url + '  ' + '站点检测完成' + ' ' * 5 + '*' * 55)
                print('*' * 150)
        except:
            print('*' * 150)
            print('*' * 50 + ' ' * 5 + self.index_url + '  ' + '站点访问失败' + ' ' * 5 + '*' * 55)
            print('*' * 150)


def site_checker(item, depath=3, fxck_oo=False):
    try:
        result = site_check(item, depath=depath, fxck_oo=fxck_oo).site_have_video()
        # print(result)
        return result
    except Exception as e:
        print(e)


if __name__ == '__main__':
    # print(is_video_('http://www.evawheel.com/eva/46770746.html').is_viedo())

    # home_page = 'http://www.dancebook.com.cn'
    # r = db.find('WEB_urls', {"home_page": home_page})[0]
    # print(r)
    # site_checker(r)

    site_checker({
        "host": "jubt4.one",
        "title": "电影天堂_百度云盘_迅雷下载_电影大全",
        "icp": "粤ICP备15055096号",
        "license": "未检测到",
        "PROVINCE_NAME": "香港",
        "CITY_NAME": "香港",
        "IP": "47.75.148.205",
        "record_time": "2020-03-13 15:43:37",
        "home_page": "https://jubt4.one/cn/index.html",
        "site_Confidence": ""
    })
