import re
import requests
import lxml.etree as etree
from fake_useragent import UserAgent
import time
import sys
import random
import threading
import os
import json

class RequestData():
    """
    数据请求
    1. 定位热门推荐模板
    2. 提取模板中的数据
    3. 提取模板中的图片地址
    4. 提取指定字段拼接为资源下载地址
    """

    def __init__(self, url):
        self.url = url
        self.ua = UserAgent()
        self.cookies = {
            'toolbox_urls': 'www.prometa.cn',
            'qHistory': 'aHR0cDovL3Nlby5jaGluYXouY29tX1NFT+e7vOWQiOafpeivonxodHRwOi8vdG9vbC5jaGluYXouY29tX+ermemVv+W3peWFtw==',
            'Hm_lvt_ca96c3507ee04e182fb6d097cb2a1a4c': '1743643260,1744077736,1744164082,1745549425',
            'Hm_lvt_398913ed58c9e7dfe9695953fb7b6799': '1745550869,1745743757,1745807533',
            'HMACCOUNT': '3C82ECCD2294926D',
            '_clck': '4fr228%7C2%7Cfvg%7C0%7C1919',
            'Hm_lpvt_398913ed58c9e7dfe9695953fb7b6799': '1745808115',
            '_clsk': '16dvm7x%7C1745808115900%7C2%7C1%7Ca.clarity.ms%2Fcollect',
        }

        self.headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'cache-control': 'no-cache',
            'priority': 'u=0, i',
            'referer': 'https://sc.chinaz.com/',
            'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'same-origin',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': self.ua.random
        }

    def serch_index(self):
        """定位热门推荐模板"""
        try:
            sessions = requests.Session()
            response = sessions.get(self.url, cookies=self.headers, headers=self.headers, timeout=10)
            response.encoding = response.apparent_encoding
            if response.status_code == 200:
                html = etree.HTML(response.text)
                html_element = html.xpath('//div[@class="jl_navbar_right fl"]/ul/li')
                print(f"{'':>3} | 热门简历模板\n"
                    f"{'-'*30}"
                    )
                index = 0
                result = []
                try:
                    for i in html_element:
                        index += 1
                        html_name = i.xpath('./a/text()')[0]
                        html_url = i.xpath('./a/@href')[0]
                        print(f"{index:>3} | {html_name}")
                        result.append((index, html_name, html_url))
                    return result
                except Exception as e:
                    print(f'解析模板失败,请稍后重试：{e}')
                    print(f'当前页面：{response.url}\t{response.status_code}')
                    print(f'程序即将退出{time.sleep(2)}')
                    sys.exit()
            else:
                print(f'请求失败：{response.url}\t状态：{response.status_code}')
                return None
        except Exception as e:
            print(f'请求失败{e}')
            return None

    def get_data(self):
        """根据用户选择，获取模板下载链接；返回资源元素"""
        index_data = self.serch_index()
        if index_data:
            print()
            index_url = input('请输入模板索引：')
            for item in index_data:
                if index_url == str(item[0]):
                    if not os.path.exists(item[1]):
                        os.makedirs(item[1])
                    index_url = item[2]
                    print(f'  模板名称：{item[1]}')
                    print(f'  模板链接：{index_url}')
                    page = 1
                    result = []
                    base_url = index_url.rsplit('.', 1)[0]
                    while True:
                        session = requests.Session()
                        session.headers.update(self.headers)
                        session.cookies.update(self.cookies)  # 更新cookie
                        session.headers['referer'] = self.url
                        session.headers['Cache-Control'] = 'no-cache'
                        index_url = f"{base_url}_{page}.html?_t={int(time.time())}" if page > 1 else index_url + f"?_t={int(time.time())}"
                        response = session.get(index_url, timeout=10)
                        response.encoding = response.apparent_encoding
                        page += 1
                        if response.status_code == 200:
                            print(f'  正在请求：{index_url}',end='\r')
                            html = etree.HTML(response.text)
                            data_link = html.xpath('//div[contains(@class,"box col3 ws_block")]/a/img')
                            if not data_link:
                                print('未找到元素，终止循环')
                                break
                            for a in data_link:
                                src_html = a.xpath('./@src')[0]
                                alt_html = a.xpath('./@alt')[0]
                                result.append((item[1], index_url, src_html, alt_html))
                            # 成功获取数据后继续下一页
                            time.sleep(random.randint(0,3))
                            max_page = html.xpath('//div/a[@class="nextpage"]')
                            if not max_page:
                                print(f'  请求完成：{index_url}')
                                break
                            continue
                        else:
                            print(f'请求失败：{index_url}\t状态：{response.status_code}')
                            break
                    return result

    def download_url(self):
        """拼接下载链接，返回实际下载链接列表"""
        # src="//scpic.chinaz.net/Files/pic/jianli/202304/zjianli1464_s.jpg"
        # 提取（202304/zjianli1464）部分进行替换
        # 替换后的链接为：https://downsc.chinaz.net/Files/DownLoad/jianli/202304/zjianli1464.rar

        result = []
        index = 0
        src_url = self.get_data()
        if src_url:
            for item in src_url:
                index += 1
                title_name = item[0]
                index_url = item[1]
                src = item[2]
                alt = item[3]
                a = re.search(r'(\d{6})/([^_]+)',src) # 匹配{6}个数字和[^_]除’_‘外的任意字符
                if a:
                    new_url = f'https://downsc.chinaz.net/Files/DownLoad/jianli/{a.group(0)}.rar'
                    result.append((title_name, index_url, alt, new_url))
            print(f'  可下载 {index} 条数据\n')
            filename = os.path.join(title_name, f'{title_name}.json')
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(result, f, ensure_ascii=False, indent=4)  # indent参数用于格式化输出，自动缩进
            return result  # 移出for循环，等所有项处理完再返回
        else:
            print(f'获取模板下载链接失败：{src_url}')
            return []

class DownLoad:
    """
    数据下载
    1. 定义单文件下载函数
    2. 多线程调用下载
    """
    def __init__(self, result_data, request_data):
        self.data = result_data
        self.request_data = request_data

    def download_single(self, item):
        title_name = item[0]
        index_url = item[1]
        alt = item[2]
        new_url = item[3]
        error_url = []
        filepath = os.path.dirname(__file__)
        filename = os.path.join(filepath, title_name,f'{alt}.rar')
        if os.path.exists(filename):
            print(f'  文件已存在：{alt}  \t{new_url}')
        else:
            print(f'  正在下载：{alt}  \t{new_url}', end='\r')
            try:
                session = requests.Session()
                session.headers.update(self.request_data.headers)
                session.cookies.update(self.request_data.cookies)  # 更新cookie
                session.headers['referer'] = index_url
                response = session.get(new_url,
                                    stream=True,
                                    timeout=10)
                if response.status_code == 200:
                    with open(filename, 'wb') as f:
                        for chunk in response.iter_content(chunk_size=1024):
                            f.write(chunk)
                    print(f'  下载完成：{alt}  \t{new_url}')
                else:
                    print(f'下载失败：{new_url}  \t状态：{response.status_code}')
            except Exception as e:
                print(f'下载失败：{new_url}  \t错误：{e}')
                error_url.append((alt,new_url))
                filename = os.path.join(filepath, title_name, f'{title_name}_error.json')
                with open(filename, 'w', encoding='utf-8') as f:
                    json.dump(error_url, f, ensure_ascii=False)

    def download(self):
        threads = []
        for item in self.data:
            thread = threading.Thread(target=self.download_single, args=(item,))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()
        

def main():
    url = 'https://sc.chinaz.com/jianli/'
    a = RequestData(url)
    result_data = a.download_url()
    if not result_data:
        print('没有获取到有效数据，程序退出')
        return
    
    b = DownLoad(result_data, a)
    b.download()
    print('\n所有下载任务已完成')
    input('按任意键退出')
    sys.exit()


if __name__ == '__main__':
    main()