#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：pachong 
@File    ：huatupy.py
@IDE     ：PyCharm 
@Author  ：11
@Date    ：2024/10/18 14:14
'''
from lxml import etree
from datetime import datetime
import queue
import random
import threading
import openpyxl
import time
import re
import pandas as pd
import requests
from bs4 import BeautifulSoup
import os


proxy_pool = [
    # {'type': 'http', 'address': '47.122.65.254:8080'},
    # {'type': 'http', 'address': '8.130.34.44:8800'},
    {'type': 'http', 'address': '47.121.183.107:8443'},
    {'type': 'http', 'address': '8.130.36.245:8002'},
    {'type': 'http', 'address': '47.122.62.83:80'},

]
q = queue.Queue()
data = []
cleaned_strings = []
Cookie = ''


def remove_whitespace(s):
    # 删除所有空白字符
    return re.sub(r"\s+", "", s)


def test():
    # 主函数，主要爬取数据
    global cleaned_strings
    headers = {
      'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 QuarkPC/1.8.3.129',
      'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
      'Accept-Encoding': 'gzip',
      'Host': 'zw.huatu.com',
      'Connection': 'keep-alive',
      'Cookie': 'uuid=uuid-7dcbdb8d-d6ec-1241-5521-01a86b5b59d6; u3_fujian=%5B%7B%22title%22%3A%22%E5%8D%8E%E5%9B%BE%E5%9C%A8%E7%BA%BF-%E5%85%AC%E8%81%8C%E6%95%99%E8%82%B2%E7%BD%91%E7%BB%9C%E5%AD%A6%E4%B9%A0%E5%B9%B3%E5%8F%B0%22%2C%22url%22%3A%22https%3A%2F%2Fv.huatu.com%2F%22%7D%5D; tfstk=g7FjOjMcInxbou94p1Qrd0scxVl_Cl1EfFgT-Pd2WjhvB8Z0RZAZuPn_fuoufAbmbcn_bPeVz65Uorci6lsFT6ul7LFafq3t6_QtJVOAYrJfxoli6MSy-8VFkfqnc-eTMzQS7VRvMlHxebno-VhtDAKJe2n-XfEtHbU-5Vi9DC39eagi2cht6xwwRV6jmr_uI-CEM1xsN0O967g-jDUfXCu7aqtjArmSrqVWDxiLk0OOx5odPciu9gW7r8eY04q1OgG4LyPipSKRuqeQ7WMEe3J7jjg_-bVdYdkzCrFtF8fpfq2Tu5wTzw9n-8E0iRGBWsUSGmMKoY1p1kHtRycu9MJZURaY6YwlvpoLZzeUCoSk6myQyuMUa3REgJzUsvFvcguJTD9KjdTSKCgSYa_WId0jgJhN0AEnvx3olY75PneiH40SYa_WIdDxrqMPPatYI; acw_tc=0bdd34b217291349201903282ec54057b47947a55acd4d79cabb3227107d46; PHPSESSID=bm7ih8vef8gff34tf42g68jc32; code=v630; backurl=https://zw.huatu.com//zhiwei2015/465715.html; login_source=https://zw.huatu.com//zhiwei2015/465715.html; randcode=2667',
    }
    while True:
        i = q.get()
        if i is None:
            break
        num = i + 465715
        url_page = f"http://zw.huatu.com//zhiwei2015/{num}.html"
        print(f'第{i+1}条数据获取中。。。')
        print(url_page)
        try:
            req_page = requests.get(url_page, headers=headers)
            # 设置编码格式为 utf-8
            req_page.encoding = 'utf-8'
            # 处理响应内容
            content = req_page.text
            # content = content.encode('latin-1', errors='ignore')
            if req_page.status_code != 200:
                print(url_page + "error!")
                print(req_page.status_code)
                continue
            soup = BeautifulSoup(content, 'html.parser')
            # 将 BeautifulSoup 对象转换为 lxml 的 etree 对象
            dom = etree.HTML(str(soup))

            # 使用 lxml 的 xpath 方法获取指定路径的内容
            result = dom.xpath('/html/body/div[4]/p/text()[2]')
            content = soup.find_all('td', limit=28)
            for ct in content:
                result.append(ct.get_text())
            cleaned_strings = [remove_whitespace(s) for s in result]
            data.append(cleaned_strings)
            print(f'第{i+1}条解析成功！\n')
        except Exception as e:
            print(f"{i+1}条错误信息：" + f'{e}')
            with open('error.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url_page}\n')
        finally:
            q.task_done()


# 对错误的网站进行重新爬取
def again_test():
    global cleaned_strings
    with open('doc.txt', 'r', encoding='utf-8') as file:
        lines = file.readlines()
    for line in lines:
        url = line.strip()  # 去除行尾的换行符
        if url == '':
            break
        try:
            # 发送HTTP请求获取网页内容
            response = requests.get(url)
            response.raise_for_status()  # 如果请求失败，抛出异常
            # 设置编码格式为 utf-8
            response.encoding = 'utf-8'
            # 处理响应内容
            content = response.text
            soup = BeautifulSoup(content, 'html.parser')
            # 将 BeautifulSoup 对象转换为 lxml 的 etree 对象
            dom = etree.HTML(str(soup))
            # 使用 lxml 的 xpath 方法获取指定路径的内容
            result = dom.xpath('/html/body/div[4]/p/text()[2]')
            content = soup.find_all('td', limit=28)
            for ct in content:
                result.append(ct.get_text())
            cleaned_strings = [remove_whitespace(s) for s in result]
            data.append(cleaned_strings)
            print(f'第{line + 1}行解析成功！\n')
        except requests.RequestException as e:
            print(f'请求失败：{url}', e)
            with open('error2.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url}\n')
    return data


if __name__ == '__main__':
    begin_time = datetime.now()
    str_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print(f'开始爬取时间：{str_time}')
    # 20810条数据，第一条数据页面：465715，最后一条页面：486524.
    # number_of_jobs为结束的页数
    number_of_jobs = 100
    # 创建线程列表
    threads = []
    for _ in range(30):
        thread = threading.Thread(target=test)
        thread.start()
        threads.append(thread)
    for i in range(0,number_of_jobs):
        q.put(i)

    q.join()
    # 结束线程
    for _ in range(30):
        q.put(None)  # 发送结束信号

    for thread in threads:
        thread.join()
    # 获取当前爬虫爬取的日期，以判断数据时效性。
    now = datetime.now()
    formatted_now = now.strftime('%Y年%m月%d日%H时%M分%S秒')
    # 整合输出数据
    file_name = f'D:\\A_project\\pachong\\result\\国考岗位信息（{formatted_now}）.xlsx'

    df = pd.DataFrame(data)
    df.to_excel(file_name, index=False, header=False)
    print(f'共耗时{round((now - begin_time).total_seconds(),2)}秒')
    print(f'生成完毕，请查看error文件或运行提示，检查是否有失败页面！！！')

# to_excl(threads)

