import json
import re
import threading
import time
from queue import Queue

import random

import datetime
import pymysql
import requests
from lxml import etree
from openpyxl import Workbook
from requests.packages.urllib3.exceptions import InsecureRequestWarning

# 禁用安全请求警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


# 连接数据库
def get_connect():
    # 连接数据库
    connect = pymysql.Connect(
        host='localhost',
        port=3306,
        user='root',
        passwd='123456',
        db='demonone',
        charset='utf8mb4'
    )
    return connect


# 获取游标
def get_cursor(connect):
    # 获取游标
    cursor = connect.cursor()
    return cursor


# 写入数据库,电影信息
def insert_film_info(film_id, film_name, film_type, film_area, film_year, film_img, film_type_name, film_detail_url, film_protagonist, film_introduce):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        now_time = datetime.datetime.now()
        # 判断是否存在
        sql_select = "SELECT * FROM xcyy_film_info WHERE film_id = %s"
        data_select = film_id
        cursor.execute(sql_select, data_select)
        rows = cursor.fetchone()
        if rows is not None:
            print("重复写入：" + film_name)
            return False
        # 写入数据库
        sql_insert = "INSERT INTO xcyy_film_info (film_id, film_name, film_type, film_area, film_year, film_img, film_type_name, film_detail_url, film_protagonist, film_introduce,film_insert_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
        data = (film_id, film_name, film_type, film_area, film_year, film_img, film_type_name, film_detail_url, film_protagonist, film_introduce, now_time)
        cursor.execute(sql_insert, data)
        connect.commit()
        return True
    except Exception as ex:
        print('视频信息写入失败：{},sql_insert:{},异常信息：{}'.format(film_name, sql_insert, str(ex)))
        return False
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 写入m3u8文件
def insert_film_play_m3u8(film_id, film_play_url, film_play_line, film_episode, play_url_m3u8):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        now_time = datetime.datetime.now()
        # 判断是否存在
        sql_select = "SELECT * FROM xcyy_film_play_m3u8 WHERE film_id = %s AND play_url_m3u8 = %s"
        data_select = (film_id, play_url_m3u8)
        cursor.execute(sql_select, data_select)
        rows = cursor.fetchone()
        film_episode = '第{}集'.format(film_episode)
        if rows is not None:
            print("重复写入：{},{}".format(str(film_episode), play_url_m3u8))
            return False
        # 写入数据库
        sql_insert = "INSERT INTO xcyy_film_play_m3u8 (film_id,film_play_url,film_play_line,film_episode,play_url_m3u8,film_insert_time) VALUES (%s, %s, %s, %s, %s, %s)"
        data = (film_id, film_play_url, film_play_line, film_episode, play_url_m3u8, now_time)
        cursor.execute(sql_insert, data)
        connect.commit()
        return True
        # print('写入成功：{},{}'.format(str(film_episode), play_url_m3u8))
    except Exception as ex:
        print('写入失败：{},sql_insert：{},异常信息：{}'.format(film_id, sql_insert, str(ex)))
        return False
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 判断一个月之内采集过的数据不采集 film_id:视频id
def collect_time_by_one_month(film_id):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        # 判断是否存在
        sql_select = "SELECT * FROM xcyy_film_info WHERE film_id = %s"
        data_select = film_id
        cursor.execute(sql_select, data_select)
        res = cursor.fetchone()
        if res is None:
            return False  # 根据film_id查询无记录 还没写入
        else:
            result = dict(zip([k[0] for k in cursor.description], res))
            # print('查询成功：{},data：{}'.format(film_id, result))
            insert_time = result.get('film_insert_time')
            timeStruct = time.strptime(insert_time, '%Y-%m-%d %H:%M:%S.%f')
            insert_timeStamp = int(time.mktime(timeStruct))
            now_timeStamp = int(time.time())  # 当前时间戳，秒级
            diff_timeStamp = now_timeStamp - insert_timeStamp
            one_month = 31 * 24 * 60 * 60
            # 判断是否大于一个月
            if diff_timeStamp < one_month:
                return True
            else:
                return False
    except Exception as ex:
        print('查询失败：{},sql_insert：{},异常信息：{}'.format(film_id, sql_insert, str(ex)))
        return True
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 记录请求解析失败日志
def insert_error_log(film_id, film_name, film_play_url, error_message):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        now_time = datetime.datetime.now()
        # 写入数据库
        sql_insert = "INSERT INTO xcyy_error_log(film_id,film_name,film_play_url,error_message,insert_time) VALUES (%s, %s, %s, %s, %s)"
        data = (film_id, film_name, film_play_url, error_message, now_time)
        cursor.execute(sql_insert, data)
        connect.commit()
    except Exception as ex:
        print('写入失败：{},sql_insert：{},异常信息：{}'.format(film_name, sql_insert, str(ex)))
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 获取get请求的结果 is_text:是否获取文本，False:二进制数据，True:文本数据(如:网页源代码)
def get_request_result(url, param=None, is_text=False):
    header = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'
    }
    try:
        if param is not None:
            url = url + '?' + param
        response = requests.get(url=url, headers=header, verify=False)
        sleep_time = random.random() + 0.5
        time.sleep(sleep_time)  # 下一次请求时，线程阻塞0.5-1秒
        if is_text:
            return response.text.strip()
        return response.content.decode('utf-8').strip()
    except Exception as e:
        print('请求url：{}，请求参数：{}，异常信息：{}'.format(url, param, str(e)))
        return '请求异常'


# 在目标html中获取所有a标签下href的value
def get_html_url():
    host = 'http://www.cu518.com'
    if host.startswith('http://'):
        host = 'https://www.cu518.com'
    
    # response = requests.get(url=host)
    response = get_request_result(url=host, is_text=True)

    # headers = response.headers
    _lxml = etree.HTML(response)
    xpath_href = _lxml.xpath('//div/div/div/div/div/a/@href')
    titles = _lxml.xpath('//div/div/div/div/div/h3/a/text()')
    hrefs = []
    for i in xpath_href:
        hrefs.append(host + i)
    print(hrefs + titles)
    return hrefs


# 按不同类型获取所有的url,保存到队列中
# 页数：{电视剧：854，电影：1918，综艺：419，动漫：441}
# 页数：{电视剧：858，电影：1921，综艺：420，动漫：442}
def get_url_byType(_hrefs, type_queue):
    # 不同类型的max_age
    dian_shi_ju = 858
    ding_ying = 1921
    zong_yi = 420
    dong_man = 442
    # 初始化变量
    max_page = 0
    film_type_name = ''
    for href in _hrefs:
        if href == 'https://www.cu518.com/xingchen_show/dianshiju-----------.html':
            max_page = dian_shi_ju
            film_type_name = '电视剧'
        elif href == 'https://www.cu518.com/xingchen_show/dianying-----------.html':
            max_page = ding_ying
            film_type_name = '电影'
        elif href == 'https://www.cu518.com/xingchen_show/zongyi-----------.html':
            max_page = zong_yi
            film_type_name = '综艺'
        elif href == 'https://www.cu518.com/xingchen_show/dongman-----------.html':
            max_page = dong_man
            film_type_name = '动漫'
        for i in range(0, max_page):
            length = len(href)
            prefix = href[0:length - 8]
            suffix = href[length - 8:length]
            url = prefix + str(i + 1) + suffix + '@' + film_type_name  # 传参视频类型名字
            type_queue.put(url)


# 获取视频的详情页面地址，保存到队列中
def get_url_detail(type_queue, detail_queue, thread):
    host = 'http://www.cu518.com'
    if host.startswith('http://'):
        host = 'https://www.cu518.com'
    while ((not type_queue.empty()) or thread.is_alive()):
        temp_type_url = type_queue.get()
        type_queue.task_done()
        # response = requests.get(url=type_url).text
        type_url = temp_type_url.split('@')[0]  # 类型url
        film_type_name = temp_type_url.split('@')[1]  # 类型名字

        response = get_request_result(url=type_url, is_text=True)
        time.sleep(0.5)

        _lxml = etree.HTML(response)
        detail_url = _lxml.xpath('//ul/li/div/a/@href')
        for i in detail_url:
            details = host + i + '@' + film_type_name
            #print(details)
            detail_queue.put(details)


# 获取视频的播放页面地址，
def insert_play_url(detail_queue,thread_list):
    host = 'http://www.cu518.com'
    if host.startswith('http://'):
        host = 'https://www.cu518.com'
    
    flag = False
    for thread in thread_list:
        if(thread.is_alive()):
            flag = True
            break

    while ((not detail_queue.empty()) or flag):
        film_id = 0  # 电影id
        film_name = ''  # 电影名字
        detail_url = ''  # 电影详情url
        try:
            temp_detail_url = detail_queue.get()
            detail_queue.task_done()
            detail_url = temp_detail_url.split('@')[0]  # 类型url
            film_type_name = temp_detail_url.split('@')[1]  # 类型名字
            temp_id = detail_url.split('/')[4]
            film_id = int(temp_id.split('.')[0])  # 电影id

            # response = requests.get(url=detail_url).text
            response = get_request_result(url=detail_url, is_text=True)
            time.sleep(0.5)

            _lxml = etree.HTML(response)
            film_name = _lxml.xpath('//div/h1/text()')  # 电影名字
            film_protagonist = _lxml.xpath('//p/a/text()')  # 主演
            film_introduce = re.findall('<span class="detail-content" style="display: none;">(.*)</span>', response)  # 简介
            film_img = _lxml.xpath('//div/a/img/@data-original')[0]  # 封面图片

            # 类型 地区 年份
            temp_str = re.findall('</span>(.*)<span class="split-line">', response)
            type_str = re.findall('<span class="text-muted">(.*)<span class="split-line">', response)[0].replace('\t', '').replace('</span>', '')
            film_type = str(type_str).split('：')[1]
            area_str = re.findall('<span class="text-muted hidden-xs">(.*)<span class="split-line">', response)[0].replace('\t', '').replace('</span>', '')
            film_area = str(area_str).split('：')[1]
            year_str = re.findall('<span class="text-muted hidden-xs">(.*)', response)[1].replace('\t', '').replace('</span>', '')
            film_year = str(year_str).split('：')[1].replace('\r', '')

            film_name = film_name[0]
            protagonist = ''
            for pt in film_protagonist:
                protagonist += pt + ','
            protagonist = protagonist[0:len(protagonist) - 6]
            if len(film_introduce) == 0:
                film_introduce.append('暂无简介')
            film_introduce = film_introduce[0]

            # 写入电影信息
            bool_one_month = collect_time_by_one_month(film_id)
            if bool_one_month:
                print('视频id：{}，视频名字：{}{}该资源一个月内采集过{}'.format(film_id, film_name, ('*' * 15), ('*' * 15)))
                continue  # 一个月之内采集过该资源,不采集,进入下一次循环
            bool_info = insert_film_info(film_id, film_name, film_type, film_area, film_year, film_img, '电影', detail_url, protagonist, film_introduce)
            if bool_info:
                print('视频信息写入成功：{}'.format(film_name))
            else:
                print('视频信息写入失败：{}'.format(film_name))
                continue  # 信息写入失败，进入下一次请求
            # 播放路线
            play_line_cn = _lxml.xpath('//div/h3/text()')
            detail_play_url = _lxml.xpath('//div/ul/li/a/@href')
            count = 0
            for url in detail_play_url:
                m3u8_url = ''  # 获取m3u8地址的url
                try:
                    # 判断是否为播放链接
                    flag = 'play' in url
                    if flag:
                        temp = url.split('/')[2]
                        film_id = int(temp.split('-')[0])
                        play_line = int(temp.split('-')[1])
                        episode = str(temp.split('-')[2]).split('.')[0]  # 集数
                        m3u8_url = host + url

                        # 抓取HTML中的.m3u8文件
                        response_m3u8_temp = get_request_result(url=m3u8_url, is_text=True)
                        time.sleep(0.5)
                        # res = json.loads(re.findall('var player_x10d26=(.+);n', response.body.decode('utf-8'))[0])
                        response_m3u8 = response_m3u8_temp.replace('}', '};')
                        result_temp = re.findall('var player_x10d26=(.*);', response_m3u8)
                        result = str(result_temp).replace('\'', '')
                        # print(result)
                        result_data = json.loads(result)
                        play_url_m3u8 = result_data[0].get('url').replace('\\', '')
                        play_url_id = result_data[0].get('id')
                        # print(play_url_m3u8)
                        # id,film_id,film_name,film_type,film_area,
                        # film_year,film_img,film_type_url,film_detail_url,
                        # film_protagonist,film_introduce
                        # 写入m3u8文件
                        bool_m3u8 = insert_film_play_m3u8(film_id, m3u8_url, play_line, episode, play_url_m3u8)
                        if bool_m3u8:
                            print('写入成功,电影名字：{},线路：{},集数：第{}集,地址：{}'.format(film_name, play_line, episode, play_url_m3u8))
                            count += 1
                        else:
                            print('写入失败,电影名字：{},线路：{},集数：第{}集,地址：{}'.format(film_name, play_line, episode, play_url_m3u8))
                except Exception as ex:
                    print('{}电影名字：{}，url：{}，异常信息：{}{}'.format(('*' * 15), film_name, m3u8_url, str(ex), ('*' * 15)))
                    insert_error_log(film_id, film_name, m3u8_url, str(ex))  # 请求异常解析异常
                    continue
            print('总行数：{}'.format(count))

        except Exception as e:
            print('{}电影名字：{}，detail_url：{}，异常信息：{}{}'.format(('*' * 15), film_name, detail_url, str(e), ('*' * 15)))
            insert_error_log(film_id, film_name, detail_url, str(e))
            continue



if __name__ == '__main__':
    print('采集电影start。。。')

    # 线程数
    NUM_WORKERS = 5
    NUM_WORKERS2 = 20

    _hrefs = get_html_url()
    # 用Queue构造一个大小为1000的线程安全的先进先出队列
    # 类型队列
    type_url_queue = Queue(maxsize=50000)

    # 详情队列
    detail_url_queue = Queue(maxsize=200000)

    # 建一个线程抓取页面url
    t1 = threading.Thread(target=get_url_byType, args=(_hrefs, type_url_queue))
    # 开始线程
    t1.start()

    # 建抓取详情页面url线程
    thread_list2 = []
    for i in range(NUM_WORKERS):
        t2 = threading.Thread(target=get_url_detail, args=(type_url_queue, detail_url_queue,t1))
        thread_list2.append(t2)

    # 建抓取播放m3u8_url线程
    thread_list3 = []
    for i in range(NUM_WORKERS2):
        t3 = threading.Thread(target=insert_play_url, args=(detail_url_queue,thread_list2))
        thread_list3.append(t3)

    for t in thread_list2:
        t.start()
    for t in thread_list3:
        t.start()

    # 结束线程
    t1.join()
    t2.join()
    for t in thread_list3:
        t.join()

    print('采集电影end。。。')
