# _*_ coding:utf-8 _*_
from queue import Queue
from threading import Thread
import time
import requests
from lxml import etree
import json


class Movies():
    def __init__(self):
        self.data_list = []
        self.timeout = 5  # 设置请求时间
        self.header = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"
        }

    # 使用多线程和队列的方式进行爬取
    def thread_queue(self, movies_data_url):
        start = time.time()
        queue = Queue()
        # 将信息放入q队列
        for i in movies_data_url:
            for j in range(1, int(i['page'])):
                url = i['url'].replace('page', str(j))
                args = [i['type'], url]
                queue.put(args)
        print('queue队列 开始大小 %d' % queue.qsize())
        # 使用多线程爬取
        for index in range(20):
            thread = Thread(target=self.get_movies, args=(queue,))
            thread.daemon = True  # 随主线程退出而退出
            thread.start()
            time.sleep(5)
        queue.join()  # 队列消费完 线程结束
        end = time.time()
        print('queue队列 结束大小 %d' % queue.qsize())
        print(f'电影数据爬取完成')
        print('总耗时：%s' % (end - start))

    def get_movies(self, queue):
        while queue.empty() is not True:
            data = queue.get()
            try:
                get_html = requests.get(data[1], timeout=self.timeout,headers=self.header).text
            except Exception as e:
                print(str(e) + data[1])
                break
            # 使用 lxml解析
            lxml_html = etree.HTML(get_html)
            result = lxml_html.xpath('/html/body/div[1]/div[3]/div/div/div[3]/div[1]/a')[:12]
            for i in result:
                data_dict = {}
                data_dict['type_id_1'] = data[0]
                # 电影名字
                data_dict['vod_name'] = i.xpath('@title')[0]
                # 电影图片
                data_dict['vod_pic'] = i.xpath('./div[1]/div[2]/img/@src')[0]
                last_html = 'https://www.lgyy.cc/' + i.xpath('@href')[0]
                try:
                    self.get_movies_details(last_html, data_dict)
                    #爬取时间
                    data_dict['vod_time']=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                    print(data_dict)
                    self.data_list.append(data_dict)
                except Exception as e:
                    print(e)
            queue.task_done()  # 告诉队列我已经获取队列内容

    # 视频详情页
    def get_movies_details(self, url, data_dict):
        html_text = requests.get(url, timeout=self.timeout,headers=self.header).text
        lxml_html = etree.HTML(html_text)
        # 年份
        vod_year = lxml_html.xpath('/html/body/div[1]/div[3]/div/div[1]/div[1]/div[2]/div[1]/div[1]/div[1]/a/text()')
        data_dict['vod_year'] = vod_year[0] if len(vod_year) != 0 else ''
        # 地区
        vod_area = lxml_html.xpath('/html/body/div[1]/div[3]/div/div[1]/div[1]/div[2]/div[1]/div[1]/div[2]/a/text()')
        data_dict['vod_area'] = vod_area[0] if len(vod_area) != 0 else ''
        # 更新时间
        # /html/body/div[1]/div[3]/div/div[1]/div[1]/div[2]/div[2]/div[1]/div[4]/div
        vod_pubdate = lxml_html.xpath(
            '/html/body/div[1]/div[3]/div/div[1]/div[1]/div[2]/div[2]/div[1]/div[4]/div/text()')
        data_dict['vod_pubdate'] = vod_pubdate[0] if len(vod_pubdate) != 0 else ''
        # 更新集数
        vod_remarks = lxml_html.xpath(
            '/html/body/div[1]/div[3]/div/div[1]/div[1]/div[2]/div[2]/div[1]/div[5]/div/text()')
        data_dict['vod_remarks'] = vod_remarks[0] if len(vod_remarks) != 0 else ''

        # 视频集数
        movies_play = lxml_html.xpath('//*[@id="panel1"][1]/div/div/a')
        ##取第一个视频里的信息
        movies_play_url = movies_play[0].xpath('./@href')[0]
        self.get_movies_play_details('https://www.lgyy.cc/' + movies_play_url, data_dict)
        vod_play_url = ''
        ##取视频集数
        for i in movies_play:
            movies_play_url = 'https://www.lgyy.cc/' + i.xpath('./@href')[0]
            movies_play_text = i.xpath('./span/text()')[0]
            play = self.get_movies_play(movies_play_url, movies_play_text)
            vod_play_url = vod_play_url + play + "#"
        data_dict['vod_play_url'] = vod_play_url[:-1]

    # 视频播放页-视频信息
    def get_movies_play_details(self, url, data_dict):
        html_text = requests.get(url, timeout=self.timeout,headers=self.header).text
        lxml_html = etree.HTML(html_text)
        json_data = \
        lxml_html.xpath("/html/body/div[1]/div[3]/div/div[1]/div/div[1]/div[1]/script[1]/text()")[0].split('=')[
            1]
        json_data = json.loads(json_data)
        # 演员
        data_dict['vod_actor'] = json_data['vod_data']['vod_actor']
        # 导演
        data_dict['vod_director'] = json_data['vod_data']['vod_director']
        # 类型
        data_dict['vod_class'] = json_data['vod_data']['vod_class']
        data_dict['vod_pic_thumb'] = json_data['vod_pic_thumb']
        # id
        data_dict['vod_id'] = json_data['id']
        # 历史观看次数
        update__json = requests.get(
            f' https://www.lgyy.cc/index.php/ajax/hits?mid=1&id={json_data["id"]}&type=update').json()['data']
        data_dict['vod_hits'] = update__json['hits']
        data_dict['vod_hits_day'] = update__json['hits_day']
        data_dict['vod_hits_week'] = update__json['hits_week']
        data_dict['vod_hits_month'] = update__json['hits_month']
        # https://www.lgyy.tv/index.php/ajax/score?mid=1&id= 评分
        score_json = requests.get(f'https://www.lgyy.tv/index.php/ajax/score?mid=1&id={json_data["id"]}').json()['data']
        data_dict['vod_score'] = score_json['score']
        data_dict['vod_score_num'] = score_json['score_num']
        data_dict['vod_score_all'] = score_json['score_all']

    # 视频播放页-视频集数加密信息
    def get_movies_play(self, url, movies_play_text):
        html_text = requests.get(url, timeout=self.timeout,headers=self.header).text
        lxml_html = etree.HTML(html_text)
        json_data = \
        lxml_html.xpath("/html/body/div[1]/div[3]/div/div[1]/div/div[1]/div[1]/script[1]/text()")[0].split('=')[
            1]
        json_data = json.loads(json_data)
        # 返回视频界解码链接
        return movies_play_text + '$' + json_data['url']

#保存数据
import pymysql
def create_data(i):
    try:
        sql = "insert into movie_collect.movievod_languangyingyuan(type_id_1,vod_name,vod_pic,vod_year,vod_area,vod_pubdate,vod_remarks,vod_actor,vod_director,vod_class,vod_pic_thumb,vod_id,vod_hits,vod_hits_day,vod_hits_week,vod_hits_month,vod_score,vod_score_num,vod_score_all,vod_play_url,vod_time,vod_content,vod_down_url,vod_plot_name,vod_plot_detail) VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
        sql=sql%(i['type_id_1'],i['vod_name'],i['vod_pic'],i['vod_year'],i['vod_area'],i['vod_pubdate'],i['vod_remarks'],i['vod_actor'],i['vod_director'],i['vod_class'],i['vod_pic_thumb'],i['vod_id'],i['vod_hits'],i['vod_hits_day'],i['vod_hits_week'],i['vod_hits_month'],i['vod_score'],i['vod_score_num'],i['vod_score_all'],i['vod_play_url'],'2022-09-21 12:00:00','','','','')
        conn = pymysql.connect(host='localhost', user='root',
                               password='soloman', database='movie_collect', port=3306)
        cursor = conn.cursor()
        cursor.execute(sql)
        conn.commit()
        cursor.close()
        conn.close()
    except Exception as e:
        print(e)
if __name__ == '__main__':
    # 1电影 2电视剧 3综艺 5动漫 4纪录片
    movies_data_url = [{'url': 'https://www.lgyy.cc/vodshow/1--------page---.html', 'type': 1, 'page': '648'},
                       {'url': 'https://www.lgyy.cc/vodshow/2--------page---.html', 'type': 2, 'page': '343'},
                       {'url': 'https://www.lgyy.cc/vodshow/3--------page---.html', 'type': 3, 'page': '198'},
                       {'url': 'https://www.lgyy.cc/vodshow/5--------page---.html', 'type': 5, 'page': '315'},
                       {'url': 'https://www.lgyy.cc/vodshow/4--------page---.html', 'type': 4, 'page': '16'}
                       ]
    movies = Movies()
    movies.thread_queue(movies_data_url)
    for i in movies.data_list:
        create_data(i)

