import requests
import time
import random
import sys
import re
import os
import lxml.etree as etree
from urllib.parse import quote
from fake_useragent import UserAgent
import queue
import threading


class LiShiPing():
    def __init__(self, url) -> None:
        self.search_queue = queue.Queue()
        self.download_queue = queue.Queue()
        self.url = url
        self.ua = UserAgent().random
        self.session = requests.Session()
        self.cookies  = {
            'PEAR_UUID': '8bce84b5-61df-47c6-9ba9-76b2d5c6949b',
            '_uab_collina': '174425391778254502410625',
            'p_h5_u': '54738951-6379-4EA5-BDF7-209C11A5F9B9',
            'Hm_lvt_9707bc8d5f6bba210e7218b8496f076a': '1744253919',
            'HMACCOUNT': '3C82ECCD2294926D',
            'JSESSIONID': '06DEB4271556207A96E635359D06B946',
            'Hm_lpvt_9707bc8d5f6bba210e7218b8496f076a': '1744274655',
        }
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,zh-TW;q=0.5',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Referer': 'https://cn.bing.com/',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'cross-site',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': self.ua,
            'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
        }
        print(
            "欢迎使用梨视频下载器\n"
            f"\n"
            "请选择功能:\n"
            " 1.视频热点下载\n"
            " 2.视频搜索下载\n"
            " 3.退出\n"
            f"{'-'*20}"
        )

    def parameter(self, referer_url, href_id=None):
        """
        用于定义请求头的Referer字段来源以及请求参数的contId字段参数
        Args:
            referer_url (str): 视频页面的URL地址
            href_id (list): 视频ID列表
            返回值:
                headers (dict): 请求头
                params (dict): 请求参数
        """
        if href_id:
            referer = referer_url + href_id
            contld_id = href_id.split('_')[-1]
        else:
            referer = quote(referer_url)
            contld_id = None
        headers = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Referer': referer,
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            'User-Agent': self.ua,
            'X-Requested-With': 'XMLHttpRequest',
            'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
        }
        params = {
            'contId': contld_id,
            'mrd': round(random.random(), 16)   # 生成16位浮点型随机数
        }
        return headers, params

    def homepage(self):
        """首页热点资源"""
        try:
            response = self.session.get(self.url, cookies=self.cookies, headers=self.headers)
            if response.status_code == 200:
                html = etree.HTML(response.text)
                video_html = html.xpath('//div[contains(@class, "ver-act-block pd040")]')
                index = 0
                for item in video_html:
                    video_list = item.xpath('.//a[contains(@href, "video_")]')
                    for i in video_list:
                        index += 1
                        href_id = i.get('href')
                        title = i.xpath('.//div[@class="actwapslide-title"]/text() | .//div[@class="vervideo-name"]/text() | .//div[@class="vervideo-title"]/text()')[0]
                        a = self.parameter(self.url, href_id)
                        headers = a[0]
                        params = a[1]
                        srcurl = self.session.get('https://www.pearvideo.com/videoStatus.jsp', params=params, cookies=self.cookies, headers=headers).json()
                        yield href_id, srcurl['videoInfo']['videos']['srcUrl'], title
            else:
                print(f"请求失败，状态码: {response.status_code}")
                return None
        except Exception as e:
            print(f"请求失败: {e}")


    def search(self, key):
        """全站搜索资源"""
        try:
            url1 = f"https://www.pearvideo.com/search.jsp?start=0&k={key}"
            a = self.parameter(self.url)[0] 
            session = requests.Session()
            response = session.get(url1, cookies=self.cookies, headers=a)
            if response.status_code == 200:
                html = etree.HTML(response.text)
                page_index = html.xpath('//div[@class="search-result-num"]/span/text()')
                if page_index:
                    page = 0
                    index = 0
                    print("搜索结果:",page_index[0])
                    while True:
                        if page == 0:
                            url2 = f"https://www.pearvideo.com/search.jsp?start={page}&k={key}"
                        else:
                            url2 = f"https://www.pearvideo.com/search_loading.jsp?start={page}&k={key}&sort="
                        b = self.parameter(f'https://www.pearvideo.com/search.jsp?start=0&k={key}')[0]
                        response_list = session.get(url2, cookies=self.cookies, headers=b)
                        html_list = etree.HTML(response_list.text)
                        video_element = html_list.xpath('//li[@class="result-list"]/div[2]/a[contains(@href, "video_")]')
                        for item in video_element:
                            index += 1
                            self.search_queue.put((item.get('href'), item.xpath('.//h2/text()')[0]))
                        if page >= int(page_index[0]):
                            break
                        page += 10
                        time.sleep(random.uniform(0.5, 3))
                return int(page_index[0])
            else:
                print(f"搜索请求失败，状态码: {response.status_code}")
                return 0
        except Exception as e:
            print(f"搜索失败: {e}")
            return 0

    def fetch_video_info(self, video_id, video_title):
        """单线程获取视频信息"""
        try:
            headers = self.parameter(self.url, video_id)[0]
            params = self.parameter(self.url, video_id)[1]
            session = requests.Session()  # 每个线程使用独立Session
            response = session.get('https://www.pearvideo.com/videoStatus.jsp', params=params, cookies=self.cookies, headers=headers)
            if response.status_code == 200:
                src_url = response.json()['videoInfo']['videos']['srcUrl']
                return (video_id, src_url, video_title)
            else:
                print(f"获取视频 {video_title} 信息失败，状态码: {response.status_code}")
                return None
        except Exception as e:
            print(f"获取视频 {video_title} 信息失败: {e}")
            return None
        finally:
            time.sleep(random.uniform(0.5, 2))  # 随机延迟

    def search_worker(self):
        """多线程获取视频信息"""
        print('正在获取视频链接')
        video_info_list = []
        lock = threading.Lock()  # 用于线程安全地追加结果

        def process_queue():
            while True:
                try:
                    video_id, video_title = self.search_queue.get(timeout=1)
                    result = self.fetch_video_info(video_id, video_title)
                    if result:
                        with lock:
                            video_info_list.append(result)
                except queue.Empty:
                    break
                except Exception as e:
                    print(f"处理视频 {video_title} 时出错: {e}")
                    self.search_queue.task_done()

        # 使用线程池并发处理
        num_threads = 5
        threads = []
        for _ in range(num_threads):
            t = threading.Thread(target=process_queue)
            t.start()
            threads.append(t)

        # 等待所有线程完成
        for t in threads:
            t.join()

        return video_info_list

    def download_worker(self, path_name):
        while True:
            try:
                video_url, file_name = self.download_queue.get(timeout=1)
                print(f"开始下载: {file_name}", end='\r')
                video = self.session.get(video_url, cookies=self.cookies, headers=self.headers)
                if video.status_code == 200:
                    if not os.path.exists(path_name):
                        os.makedirs(path_name)
                    with open(f"{path_name}/{file_name}.mp4", 'wb') as f:
                        f.write(video.content)
                    print(f"下载完成: {file_name}")
                else:
                    print(f"下载失败 {file_name}, 状态码: {video.status_code}")
                self.download_queue.task_done()
            except queue.Empty:
                break
            except Exception as e:
                print(f"下载 {file_name} 时出错: {e}")
                self.download_queue.task_done()
            finally:
                time.sleep(random.uniform(0.5, 2))

    def download(self, video_info_list=None, path_name="downloads"):
        threads = []
        num_threads = 5

        if not video_info_list:
            video_info_list = self.search_worker()

        if not video_info_list:
            print("无视频数据可下载。")
            return

        for item in video_info_list:
            video_id, src_url, title = item
            cont_id = 'cont-' + video_id.split('_')[-1]
            video_url = re.sub(r'(?<=/)\d+(?=-)', cont_id, src_url)
            file_name = re.sub(r'[\\/*？?:。"<>|]', ',', title)
            self.download_queue.put((video_url, file_name))
        
        print('视频链接获取完成,准备下载。')
        for _ in range(num_threads):
            t = threading.Thread(target=self.download_worker, args=(path_name,))
            t.start()
            threads.append(t)

        self.download_queue.join()

        for t in threads:
            t.join()

def main():
    url = "https://www.pearvideo.com/"
    q = LiShiPing(url)
    while True:
        try:
            a = int(input("请输入功能编号: "))
            if a == 1:
                q.download(q.homepage(), '热门视频')
                break
            elif a == 2:
                search_key = input("请输入搜索关键词: ")
                result_count = q.search(search_key)
                if result_count > 0:
                    q.download(path_name=search_key)
                break
            elif a == 3:
                break
            else:
                print("请输入有效的功能编号（1-3）。")
        except ValueError:
            print("请输入数字编号。")
        except Exception as e:
            print(f"发生错误: {e}")
            time.sleep(2)
    print("任务完成，程序退出。")
    sys.exit()

if __name__ == '__main__':
    main()