import json
import re
import threading
import time
from queue import Queue

import random
import datetime
import pymysql
import requests
from lxml import etree
from requests.packages.urllib3.exceptions import InsecureRequestWarning

# 禁用安全请求警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


# 连接数据库
def get_connect():
    # 连接数据库
    connect = pymysql.Connect(
        host='localhost',
        port=3306,
        user='root',
        passwd='123456',
        db='demonone',
        charset='utf8mb4'
    )
    return connect


# 获取游标
def get_cursor(connect):
    # 获取游标
    cursor = connect.cursor()
    return cursor


# 写入数据库,电影信息
def insert_film_info(film_id, film_name, film_type, film_area, film_year, film_img, film_type_name, film_detail_url, film_protagonist, film_introduce):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        now_time = datetime.datetime.now()
        # 判断是否存在
        sql_select = "SELECT * FROM xcyy_film_info WHERE film_id = %s"
        data_select = film_id
        cursor.execute(sql_select, data_select)
        rows = cursor.fetchone()
        if rows is not None:
            print("重复写入：" + film_name)
            return False
        # 写入数据库
        sql_insert = "INSERT INTO xcyy_film_info (film_id, film_name, film_type, film_area, film_year, film_img, film_type_name, film_detail_url, film_protagonist, film_introduce,film_insert_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
        data = (film_id, film_name, film_type, film_area, film_year, film_img, film_type_name, film_detail_url, film_protagonist, film_introduce, now_time)
        cursor.execute(sql_insert, data)
        connect.commit()
        return True
    except Exception as ex:
        print('视频信息写入失败：{},sql_insert:{},异常信息：{}'.format(film_name, sql_insert, str(ex)))
        return False
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 写入m3u8文件
def insert_film_play_m3u8(film_id, film_play_url, film_play_line, film_episode, play_url_m3u8):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        now_time = datetime.datetime.now()
        # 判断是否存在
        sql_select = "SELECT * FROM xcyy_film_play_m3u8 WHERE film_id = %s AND play_url_m3u8 = %s"
        data_select = (film_id, play_url_m3u8)
        cursor.execute(sql_select, data_select)
        rows = cursor.fetchone()
        film_episode = '第{}集'.format(film_episode)
        if rows is not None:
            print("重复写入：{},{}".format(str(film_episode), play_url_m3u8))
            return False
        # 写入数据库
        sql_insert = "INSERT INTO xcyy_film_play_m3u8 (film_id,film_play_url,film_play_line,film_episode,play_url_m3u8,film_insert_time) VALUES (%s, %s, %s, %s, %s, %s)"
        data = (film_id, film_play_url, film_play_line, film_episode, play_url_m3u8, now_time)
        cursor.execute(sql_insert, data)
        connect.commit()
        return True
        # print('写入成功：{},{}'.format(str(film_episode), play_url_m3u8))
    except Exception as ex:
        print('写入失败：{},sql_insert：{},异常信息：{}'.format(film_name, sql_insert, str(ex)))
        return False
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 判断一个月之内采集过的数据不采集 film_id:视频id
def collect_time_by_one_month(film_id):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        # 判断是否存在
        sql_select = "SELECT * FROM xcyy_film_info WHERE film_id = %s"
        data_select = film_id
        cursor.execute(sql_select, data_select)
        res = cursor.fetchone()
        if res is None:
            return False  # 根据film_id查询无记录 还没写入
        else:
            result = dict(zip([k[0] for k in cursor.description], res))
            # print('查询成功：{},data：{}'.format(film_id, result))
            insert_time = result.get('film_insert_time')
            timeStruct = time.strptime(insert_time, '%Y-%m-%d %H:%M:%S.%f')
            insert_timeStamp = int(time.mktime(timeStruct))
            now_timeStamp = int(time.time())  # 当前时间戳，秒级
            diff_timeStamp = now_timeStamp - insert_timeStamp
            one_month = 31 * 24 * 60 * 60
            # 判断是否大于一个月
            if diff_timeStamp < one_month:
                return True
            else:
                return False
    except Exception as ex:
        print('查询失败：{},sql_insert：{},异常信息：{}'.format(film_id, sql_insert, str(ex)))
        return True
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 记录请求解析失败日志
def insert_error_log(film_id, film_name, film_play_url, error_message):
    # 连接数据库获取游标
    connect = get_connect()
    cursor = get_cursor(connect)
    sql_insert = ''
    try:
        now_time = datetime.datetime.now()
        # 写入数据库
        sql_insert = "INSERT INTO xcyy_error_log(film_id,film_name,film_play_url,error_message,insert_time) VALUES (%s, %s, %s, %s, %s)"
        data = (film_id, film_name, film_play_url, error_message, now_time)
        cursor.execute(sql_insert, data)
        connect.commit()
    except Exception as ex:
        print('写入失败：{},sql_insert：{},异常信息：{}'.format(film_name, sql_insert, str(ex)))
    finally:
        # 关闭连接
        cursor.close()
        connect.close()


# 获取get请求的结果 is_text:是否获取文本，False:二进制数据，True:文本数据(如:网页源代码)
def get_request_result(url, param=None, is_text=False):
    header = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'
    }
    try:
        if param is not None:
            url = url + '?' + param
        response = requests.get(url=url, headers=header, verify=False)
        sleep_time = random.random() + 0.5
        time.sleep(sleep_time)  # 下一次请求时，线程阻塞0.5-1秒
        if is_text:
            return response.text.strip()
        return response.content.decode('utf-8').strip()
    except Exception as e:
        print('请求url：{}，请求参数：{}，异常信息：{}'.format(url, param, str(e)))
        return '请求异常'


# 在目标html中获取所有a标签下href的value
def get_html_url():
    host = 'http://www.cu518.com'
    if host.startswith('http://'):
        host = 'https://www.cu518.com'
    response = requests.get(url=host)
    html = response.text
    headers = response.headers
    _lxml = etree.HTML(html)
    # href = _lxml.xpath('//a/@href')
    xpath_href = _lxml.xpath('//div/div/div/div/div/a/@href')
    titles = _lxml.xpath('//div/div/div/div/div/h3/a/text()')
    hrefs = []
    for i in xpath_href:
        hrefs.append(host + i)
    print(hrefs + titles)
    return hrefs


# 按不同类型获取所有的url,保存到队列中
# 页数：{电视剧：854，电影：1918，综艺：419，动漫：441}
# 页数：{电视剧：858，电影：1921，综艺：420，动漫：442}
def get_url_byType(_hrefs, type_queue):
    # 不同类型的max_age
    dian_shi_ju = 858
    ding_ying = 1921
    zong_yi = 420
    dong_man = 442
    # 初始化变量
    max_page = 0
    for href in _hrefs:
        if href == 'https://www.cu518.com/xingchen_show/dianshiju-----------.html':
            max_page = dian_shi_ju
        # if href == 'https://www.cu518.com/xingchen_show/dianshiju-----------.html':
        #     max_page = dian_shi_ju
        # elif href == 'https://www.cu518.com/xingchen_show/dianying-----------.html':
        #     max_page = ding_ying
        # elif href == 'https://www.cu518.com/xingchen_show/zongyi-----------.html':
        #     max_page = zong_yi
        # elif href == 'https://www.cu518.com/xingchen_show/dongman-----------.html':
        #     max_page = dong_man
        for i in range(0, max_page):
            length = len(href)
            prefix = href[0:length - 8]
            suffix = href[length - 8:length]
            url = prefix + str(i + 1) + suffix
            type_queue.put(url)


# 获取视频的详情页面地址，保存到队列中
def get_url_detail(type_queue, detail_queue):
    host = 'http://www.cu518.com'
    if host.startswith('http://'):
        host = 'https://www.cu518.com'
    while not type_queue.empty():
        type_url = type_queue.get()
        type_queue.task_done()
        # response = requests.get(url=type_url).text
        response = get_request_result(url=type_url, is_text=True)
        time.sleep(0.1)

        _lxml = etree.HTML(response)
        detail_url = _lxml.xpath('//ul/li/div/a/@href')
        for i in detail_url:
            details = host + i
            print(details)
            detail_queue.put(details)


# 获取视频的播放页面地址，
def insert_play_url(detail_queue):
    host = 'http://www.cu518.com'
    if host.startswith('http://'):
        host = 'https://www.cu518.com'
    while not detail_queue.empty():
        film_id = 0  # 电影id
        film_name = ''  # 电影名字
        detail_url = ''  # 电影详情url
        try:
            detail_url = detail_queue.get()
            detail_queue.task_done()
            temp_id = detail_url.split('/')[4]
            film_id = int(temp_id.split('.')[0])  # 电影id

            # response = requests.get(url=detail_url).text
            response = get_request_result(url=detail_url, is_text=True)
            time.sleep(0.1)

            _lxml = etree.HTML(response)
            film_name = _lxml.xpath('//div/h1/text()')  # 电影名字
            film_protagonist = _lxml.xpath('//p/a/text()')  # 主演
            film_introduce = re.findall('<span class="detail-content" style="display: none;">(.*)</span>', response)  # 简介
            film_img = _lxml.xpath('//div/a/img/@data-original')[0]  # 封面图片

            # 类型 地区 年份
            temp_str = re.findall('</span>(.*)<span class="split-line">', response)
            type_str = re.findall('<span class="text-muted">(.*)<span class="split-line">', response)[0].replace('\t', '').replace('</span>', '')
            film_type = str(type_str).split('：')[1]
            area_str = re.findall('<span class="text-muted hidden-xs">(.*)<span class="split-line">', response)[0].replace('\t', '').replace('</span>', '')
            film_area = str(area_str).split('：')[1]
            year_str = re.findall('<span class="text-muted hidden-xs">(.*)', response)[1].replace('\t', '').replace('</span>', '')
            film_year = str(year_str).split('：')[1].replace('\r', '')

            film_name = film_name[0]
            protagonist = ''
            for pt in film_protagonist:
                protagonist += pt + ','
            protagonist = protagonist[0:len(protagonist) - 6]
            if len(film_introduce) == 0:
                film_introduce.append('暂无简介')
            film_introduce = film_introduce[0]

            # 写入电视剧信息
            bool_one_month = collect_time_by_one_month(film_id)
            if bool_one_month:
                print('视频id：{}，视频名字：{}-->{}该资源一个月内采集过{}'.format(film_id, film_name, ('*' * 15), ('*' * 15)))
                continue  # 一个月之内采集过该资源,不采集,进入下一次循环
            bool_info = insert_film_info(film_id, film_name, film_type, film_area, film_year, film_img, '电视剧', detail_url, protagonist, film_introduce)
            if bool_info:
                print('电视剧信息写入成功：{}'.format(film_name))
            else:
                print('电视剧信息写入失败：{}'.format(film_name))
                continue  # 信息写入失败，进入下一次请求
            # 播放路线
            play_line_cn = _lxml.xpath('//div/h3/text()')
            detail_play_url = _lxml.xpath('//div/ul/li/a/@href')
            count = 0
            for url in detail_play_url:
                m3u8_url = ''
                try:
                    # 判断是否为播放链接
                    flag = 'play' in url
                    if flag:
                        temp = url.split('/')[2]
                        film_id = int(temp.split('-')[0])
                        play_line = int(temp.split('-')[1])
                        episode = str(temp.split('-')[2]).split('.')[0]  # 集数
                        m3u8_url = host + url

                        # 抓取HTML中的.m3u8文件
                        response_m3u8_temp = get_request_result(url=m3u8_url, is_text=True)
                        time.sleep(0.2)
                        # res = json.loads(re.findall('var player_x10d26=(.+);n', response.body.decode('utf-8'))[0])
                        response_m3u8 = response_m3u8_temp.replace('}', '};')
                        result_temp = re.findall('var player_x10d26=(.*);', response_m3u8)
                        result = str(result_temp).replace('\'', '')
                        # print(result)
                        result_data = json.loads(result)
                        play_url_m3u8 = result_data[0].get('url').replace('\\', '')
                        play_url_id = result_data[0].get('id')
                        # print(play_url_m3u8)
                        # 写入m3u8文件
                        bool_m3u8 = insert_film_play_m3u8(film_id, m3u8_url, play_line, episode, play_url_m3u8)
                        if bool_m3u8:
                            print('写入成功,电视剧名字：{},线路：{},集数：第{}集,地址：{}'.format(film_name, play_line, episode, play_url_m3u8))
                            count += 1
                        else:
                            print('写入失败,电视剧名字：{},线路：{},集数：第{}集,地址：{}'.format(film_name, play_line, episode, play_url_m3u8))
                except Exception as ex:
                    print('{}电视剧名字：{}，url：{}，异常信息：{}{}'.format(('*' * 15), film_name, m3u8_url, str(ex), ('*' * 15)))
                    insert_error_log(film_id, film_name, m3u8_url, str(ex))  # 请求异常解析异常
                    continue
            print('电视剧名字：{},总行数：{}'.format(film_name, count))

        except Exception as e:
            print('{}电视剧名字：{}，detail_url：{}，异常信息：{}{}'.format(('*' * 15), film_name, detail_url, str(e), ('*' * 15)))
            insert_error_log(film_id, film_name, detail_url, str(e))  # 请求异常解析异常
            continue


if __name__ == '__main__':
    print('采集电视剧start。。。')
    _hrefs = get_html_url()
    # 用Queue构造一个大小为1000的线程安全的先进先出队列
    # 类型队列
    type_url_queue = Queue(maxsize=50000)
    # 详情队列
    detail_url_queue = Queue(maxsize=200000)

    # 建一个线程抓取页面url
    t1 = threading.Thread(target=get_url_byType, args=(_hrefs, type_url_queue))
    # 开始线程
    t1.start()

    # 建一个线程抓取详情页面url
    t2 = threading.Thread(target=get_url_detail, args=(type_url_queue, detail_url_queue))
    time.sleep(2)
    t2.start()

    # 建一个线程抓取播放m3u8_url
    t3 = threading.Thread(target=insert_play_url, args=(detail_url_queue,))
    time.sleep(5)
    t3.start()

    # 结束线程
    t1.join()
    t2.join()
    t3.join()

    print('采集电视剧end。。。')
