import hashlib
import io
import os
import re
import time
from datetime import datetime

import requests
import urllib3
from PIL import Image
from bson import ObjectId

from common.utils import is_linux
from database.db import get_undetail_movie, save_movie_detail, get_movie, delete_movie, get_categories, \
    movieCol, save_video_online, videoCol
from database.redis_con import redis_con
from downloader import download
from online_check import check_link

KEYS = [
    '上映日期', 'IMDb评分', '豆瓣评分', '片长', '导演', '主演', '字幕', '语言', '类型', '地区', '年代', '类别',
    'IMDB评分', '国家', '制片国家/地区'
]

KEY_MAP = {
    '上映日期': 'release_date',
    'IMDb评分': 'imdb',
    '豆瓣评分': 'douban',
    '片长': 'duration',
    '导演': 'director',
    '主演': 'actors',
    '字幕': 'subtitle',
    '语言': 'language',
    '类型': 'categories',
    '类别': 'categories',
    '地区': 'country',
    '国家': 'country',
    '制片国家/地区': 'country'
}

md5 = lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()


def extract_onlines(movie, resp):
    has_valid_video_link = False
    base_url = resp.url
    site = re.findall('(https?://.*?)/', base_url)[0]
    play_urls = resp.xpath('//div[@class="widget box row"]/h3[contains(text(),"播放地址")]/following::a[@class="lBtn"]/@href').extract()
    play_urls = [f'{site}{x}' if x[:4] != 'http' else x for x in play_urls]
    titles = resp.xpath('//h3[contains(text(),"播放地址")]/following::a[@class="lBtn"]/@title').extract()
    for i, url in enumerate(play_urls):
        title = titles[i]
        print('正在抓取', title)
        try:
            resp = download(url)
            video_source = resp.xpath('//iframe/@src').extract()[0]
            print('video:', video_source)
        except:
            continue
        if videoCol.find_one({'$and': [{'video_link': video_source}, {'movie_id': str(movie['_id'])}]}):
            has_valid_video_link = True
            continue
        valid, source = check_link(video_source)
        m3u8 = ''
        for url in source:
            if url[-4:] == 'm3u8':
                m3u8 = url
                print(m3u8)
        if valid:
            online = {
                'title': title,
                'video_link': video_source,
                'source': source,
                'order': i,
                'movie': movie['name'],
                'movie_id': str(movie['_id']),
                'createdAt': datetime.now(),
                'md5': md5(video_source),
                'm3u8': m3u8
            }
            save_video_online(online)
            has_valid_video_link = True
    return has_valid_video_link


def extract_video(detail, url):
    resp = download(url)
    ret = extract_onlines(detail, resp)
    return ret


def parse_intro(resp):
    lines = resp.xpath('//div[@id="endText"]')[0].xpath('string(.)').extract()[0].replace('\u3000', '').split('\r\n')
    lines = list(filter(lambda x: x.strip(), lines))
    intros = []
    isIntro = False
    for line in lines:
        if ('简介' in line or '剧情介绍' in line) and not isIntro:
            isIntro = True
            continue
        if '下载地址' in line:
            isIntro = False
            continue
        if isIntro:
            intros.append(line)
    if intros:
        return '\n'.join(intros).replace('&middot;', '·')
    else:
        redis_con.sadd('intro_fail', resp.url)
        return ''


def extract_detail(resp):
    try:
        content = resp.xpath('//meta[@name="description"]/@content').extract()[0]
    except:
        raise ValueError(resp.url)
    content = content.replace('\u3000', '')
    texts = content.split('<br />')
    texts = [x.replace('◎', '').strip() for x in texts if x.replace('◎', '').strip()]
    infos = {}
    for key in KEYS:
        for text in texts[:-1]:
            try:
                if value := re.findall(f'{key}(.*)', text):
                    value = re.sub(r'[∶:]+', ' ', value[0]).strip().replace('&middot;', '·')
                    if '评分' in key:
                        value = re.findall(r'([\d.]+?/10)', value)[0]
                    if '日期' in key:
                        value = re.findall(r'(\d{4}-\d{2}-\d{2})', value)[0]
                    infos[KEY_MAP[key]] = value
                    break
            except:
                continue

    infos['intro'] = parse_intro(resp)
    return infos


def parse_category(url):
    if re.match('https://www.[0-9a-z]+.[a-z]+/dlz/.*html', url):
        return '国剧'
    if re.match('https://www.[0-9a-z]+.[a-z]+/rj/.*html', url):
        return '日韩'
    if re.match('https://www.[0-9a-z]+.[a-z]+/mj/.*html', url):
        return '欧美剧'


def extract_title(resp):
    return resp.xpath('//h1/text()').extract_first()


def parse_detail(m):
    print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '解析', m['title'], m['url'], m['date'])
    url = m['url']
    if url[:4] != 'http':
        print('删除无效的电影', url)
        delete_movie(m)
        return
    try:
        resp = download(url)
    except:
        return
    title = extract_title(resp)
    if title:
        m['title'] = title
    for word in ['典礼', '晚会', '排行榜', '推荐', '榜单', '合集', '口碑', '演唱会']:
        if word in title:
            delete_movie(m)
            print('删除', title)
            return
    try:
        name = re.findall('《(.*)》', title)[0]
    except:
        delete_movie(m)
        print('删除', title)
        return

    detail = m
    detail.update(extract_detail(resp))
    # 解析图片
    images = resp.xpath('//div[@id="endText"]/*//img/@src').extract()
    # try:
    #     imagefilename = crawl_movie_image(resp)
    # except:
    #     print('爬取图片失败', url)

    # 解析下载链接
    links = []
    tds = resp.xpath('//div[@id="main"]/*//table/*//td')
    for td in tds:
        try:
            download_type = td.xpath('text()').extract()[0]
            title = td.xpath('a/text()').extract()[0]
            link = td.xpath('a/@href').extract()[0]
            links.append({
                'type': download_type,
                'title': title,
                'link': link
            })
        except:
            try:
                download_type = td.xpath('p/text()').extract()[0]
                title = td.xpath('p/a/text()').extract()[0]
                link = td.xpath('p/a/@href').extract()[0]
                links.append({
                    'type': download_type,
                    'title': title,
                    'link': link
                })
            except:
                continue
    # 解析在线观看
    online = 0
    download_links = []
    online_links = []
    for link in links:
        if '网盘' in link['type']:
            continue
        if '在线观看' in link['type'] or '在线地址' in link['type']:
            online_links.append(link)
            try:
                online += (1 if extract_video(detail, link['link']) else 0)
            except:
                continue
        else:
            download_links.append(link)

    cover = images[0] if images else ''
    detail.update({
        'cover': cover,
        'online_links': online_links,
        'images_url': images,
        # 'image': imagefilename,
        'cover': images[0] if images else None,
        'url': url,
        'links': download_links,
        'valid': 1 if ('valid' in m.keys() and m['valid'] == 1) or online > 0 else 0,
        'updated': 0
    })
    save_movie_detail(detail)


def crawl_movie_image(resp):
    image_path = './download/images'
    thumb_path = './download/thumbnails'
    image_url = resp.xpath('//div[@id="main"]//img/@src').extract()[0]
    filename = str(md5(resp.url))
    image_resp = download_image(image_url)
    if not image_resp:
        return None, None
    if not os.path.exists(image_path):
        os.makedirs(image_path)
    if not os.path.exists(thumb_path):
        os.makedirs(thumb_path)
    if is_linux():
        filepath = f'/root/movie_server/static/images/{filename}.jpg'
    else:
        filepath = f'./download/images/{filename}.jpg'
    with open(filepath, 'wb') as f:
        f.write(image_resp.content)
        print('保存封面', filepath)
    if is_linux():
        thumb_path = f'/root/movie_server/static/thumbnails/{filename}.jpg'
    else:
        thumb_path = f'./download/thumbnails/{filename}.jpg'
    create_thumbnails(image_resp.content, thumb_path)
    print('保存缩略图', thumb_path)
    return filename

    # files = {"image": (f'{filename}.jpg', image_resp.content, "image/jpeg")}
    # try:
    #     upload_resp = requests.post('http://8.134.126.65:8018/upload_image', files=files, timeout=120)
    # except:
    #     raise ValueError(image_url)
    # # upload_resp = requests.post('http://localhost:8192/upload_image', files=files, timeout=60)
    # if upload_resp.status_code == 200:
    #     ack = upload_resp.json()
    #     return f'http://8.134.126.65:8018{ack["image"]}', f'http://8.134.126.65:8018{ack["thumb"]}'
    # else:
    #     return None, None


def create_thumbnails(content, outputfile):
    img_bytes = io.BytesIO(content)
    image = Image.open(img_bytes)
    image.thumbnail((126, 180))
    image.save(outputfile)


def download_image(img, retry=0):
    """
    下载图片
    :param img:
    :param retry:
    :return:
    """
    if retry == 3:
        return ''
    try:
        resp = requests.get(img, timeout=30, headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
        })
        if resp.status_code == 200:
            return resp
        else:
            return download_image(img, retry + 1)
    except Exception as e:
        return download_image(img, retry + 1)


def download_images(images):
    if images:
        image = images[0]
        return download_image(image)
    else:
        return ''


def parse_movie(m, retry=0):
    if retry == 3:
        return
    try:
        if not m:
            print('没有需要爬取的链接')
            time.sleep(600)
            return
        for key in ['颁奖典礼', '晚会', '排行榜', '佳片推荐', '榜单', '合集', '口碑', '演唱会']:
            if key in m['title']:
                delete_movie(m)
                time.sleep(5)
                return
        parse_detail(m)
    except Exception as e:
        parse_movie(m, retry + 1)


def run_detail_spider(debug=False):
    urllib3.disable_warnings()
    while True:
        m = get_undetail_movie(debug)
        if m:
            parse_movie(m)
        else:
            time.sleep(300)


def parse(id):
    movie = get_movie(id=str(ObjectId(id)))
    parse_movie(movie)


def extract_tip(title):
    content = title.split('》', '')[0]
    content = content.replace('更新', '').replace('高清', '')
    content = re.sub(r'[\]\[]', '', content)
    return content


def crawl(url, m={}):
    try:
        resp = download(url)
    except:
        return
    title = extract_title(resp)
    if title:
        m['title'] = title
    for word in ['典礼', '晚会', '排行榜', '推荐', '榜单', '合集', '口碑', '演唱会']:
        if word in title:
            delete_movie(m)
            print('删除', title)
            return
    try:
        name = re.findall('《(.*)》', title)[0]
    except:
        delete_movie(m)
        print('删除', title)
        return
    tip = extract_tip(title)
    detail = m
    detail.update(extract_detail(resp))
    # 解析图片
    images = resp.xpath('//div[@id="endText"]/*//img/@src').extract()
    local_image = download_images(images)

    # 解析下载链接
    links = []
    tds = resp.xpath('//div[@id="main"]/*//table/*//td')
    for td in tds:
        try:
            download_type = td.xpath('text()').extract()[0]
            title = td.xpath('a/text()').extract()[0]
            link = td.xpath('a/@href').extract()[0]
            links.append({
                'type': download_type,
                'title': title,
                'link': link
            })
        except:
            try:
                download_type = td.xpath('p/text()').extract()[0]
                title = td.xpath('p/a/text()').extract()[0]
                link = td.xpath('p/a/@href').extract()[0]
                links.append({
                    'type': download_type,
                    'title': title,
                    'link': link
                })
            except:
                continue
    # 解析在线观看
    online = 0
    download_links = []
    online_links = []
    for link in links:
        if '在线观看' in link['type'] or '在线地址' in link['type']:
            online_links.append(link)
            try:
                online += (1 if extract_video(detail, link['link']) else 0)
            except Exception as e:
                raise e
                continue
        else:
            download_links.append(link)

    category = m['category']
    if not category:
        categories = get_categories()
        for cate in categories:
            if cate['category'] == parse_category(m['url']) or cate['category'] in m['title']:
                category = cate
                break
    if category and category['category'] == '动画':
        if '全集' in detail['title'] or re.findall('第.*[季集]', detail['title']) or '更新' in detail['title']:
            detail['main_category'] = 'tv'
        else:
            detail['main_category'] = 'movie'
    elif category and category['category'] in ['日韩剧', '欧美剧', '国剧', '纪录']:
        detail['main_category'] = 'tv'
    else:
        detail['main_category'] = 'movie'
    try:
        detail['sub_category'] = category['category']
    except:
        detail['sub_category'] = None

    if not detail['category'] and category:
        detail['category'] = category
    else:
        if '全集' in detail['title'] or re.findall('第.*[季集]', detail['title']) or '更新' in detail['title']:
            detail['main_category'] = 'tv'
        else:
            detail['main_category'] = 'movie'
    cover = images[0] if images else ''
    detail.update({
        'cover': cover,
        'online_links': online_links,
        'images_url': images,
        'image': local_image,
        'url': url,
        'links': download_links,
        'valid': 1 if ('valid' in m.keys() and m['valid'] == 1) or online > 0 else 0,
        'updated': 0
    })


if __name__ == '__main__':
    # parse()
    run_detail_spider(True)
    #
    # crawl('http://www.hao6v.com/dy/2022-05-06/39448.html')
