from concurrent.futures import ThreadPoolExecutor, as_completed
import json
from concurrent.futures import ThreadPoolExecutor
import time
import requests
import queue
import threading
import pandas as pd
from cookie_get_manual import cookie_get_manual

all_list = []
User_Agent = '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'''
cookie=''
def add_all_list(page,data,headers):
    global all_list
    try:
        for i in range(30):
            company_brief_address1 = data['zpData']['jobList'][i]['cityName']
            company_brief_address2 = data['zpData']['jobList'][i]['areaDistrict']
            company_brief_address3 = data['zpData']['jobList'][i]['businessDistrict']
            company_brief_address = company_brief_address1 + company_brief_address2 + company_brief_address3
            hr_name = data['zpData']['jobList'][i]['bossName']
            job_tags = data['zpData']['jobList'][i]['jobLabels']
            job_title = data['zpData']['jobList'][i]['jobName']
            job_salary = data['zpData']['jobList'][i]['salaryDesc']
            company_name = data['zpData']['jobList'][i]['brandName']
            company_status = data['zpData']['jobList'][i]['brandStageName']
            company_size = data['zpData']['jobList'][i]['brandScaleName']
            company_type = data['zpData']['jobList'][i]['brandIndustry']
            securityId = data['zpData']['jobList'][i]['securityId']
            lid = data['zpData']['jobList'][i]['lid']
            recommend_url2 = f'''
                        https://www.zhipin.com/wapi/zpgeek/job/detail.json?securityId={securityId}&lid={lid}
                        '''
            resq2 = requests.get(recommend_url2, headers=headers)
            # 解析JSON字符串
            data2 = json.loads(resq2.text)
            company_detailed_address = data2['zpData']['jobInfo']['address']
            company_intro = data2['zpData']['brandComInfo']['introduce']
            job_description1 = data2['zpData']['jobInfo']['postDescription']
            job_description2 = data2['zpData']['jobInfo']['showSkills']
            job_description3 = data2['zpData']['brandComInfo']['labels']
            job_description = job_description1 + '\n岗位要求：' + str(job_description2) + '\n岗位福利：' + str(
                job_description3)
            pd_data = [company_brief_address, company_detailed_address, hr_name,
                       job_tags, job_title, job_salary, company_name, company_intro
                , company_status, company_size, company_type, job_description]
            all_list.append(pd_data)
            # print(f"page为{page}的第{i}个爬取完成")
    except:
        print(f"page为{page}的第{i}个报错，没有此数据")
        return

def fetch_url(url,headers,ban_url:queue.Queue):
    # with open('cookies.txt', 'r') as file:
    #     # 读取文件内容
    #     cookie = file.read()
    headers = {
        'User-Agent': User_Agent,
        'Cookie': cookie.replace('\n', '').replace('\r', '').replace(' ', '')
    }
    resq = requests.get(url, headers=headers)
    data = json.loads(resq.text)
    if data['code'] == 37:
        print('触发反爬')
        # time.sleep()
        raise Exception(f"爬取发生错误")

    elif data['code'] == 0:
        print("连接成功")
        page = 'test'
        add_all_list(page, data, headers)


if __name__ == '__main__':

    urls = []
    error_list=[]
    #南京
    # csv_name='nanjing'
    # city = 101190100
    # areaBusiness = [320102, 320111, 320104, 320105, 320106, 320116, 320117, 320118, 320113, 320114, 320115]
    # 南昌
    # csv_name = 'nanchang'
    # city=101240100
    # areaBusiness = [360103,360102,360111,360104,360113,360112,360124,360121,360123]
    csv_name='shanghai'
    city = 101020100
    areaBusiness = [310151,310101,310109,310110,310104,310105,310106,310107,310116,310117,310118,310112,310113,310114,310115,310120]
    position = 100101
    for Business in areaBusiness:
        for page in range(1, 11):
            recommend_url1 = f'''https://www.zhipin.com/wapi/zpgeek/search/joblist.json?scene=1&query=&city={city}&experience=&payType=&partTime=&degree=&industry=&scale=&stage=&position={position}&jobType=&salary=&multiBusinessDistrict={Business}&multiSubway=&page={page}&pageSize=30'''
            urls.append(recommend_url1)
    ban_url = queue.Queue()
    cookie_get_manual()
    # 每次处理4个URL
    urls_per_batch = 4
    # 创建线程池
    executor = ThreadPoolExecutor(max_workers=urls_per_batch)
    # 打开文件用于读取
    with open('cookies.txt', 'r') as file:
        # 读取文件内容
        cookie = file.read()
    headers = {
        'User-Agent': User_Agent,
        'Cookie': cookie.replace('\n', '').replace('\r', '').replace(' ', '')
    }
    count=0
    # 循环直到所有URL都被处理
    print(f"需要爬{len(urls) / urls_per_batch}轮")
    while urls:
        count=count+1
        # 获取下一批URL
        print(f"还需要爬{len(urls)/urls_per_batch}轮")
        next_batch = urls[:urls_per_batch]
        urls = urls[urls_per_batch:]

        # 提交到线程池
        futures = [executor.submit(fetch_url, url, headers, ban_url) for url in next_batch]

        # 等待这一批任务完成
        for future in as_completed(futures):
            try:
                data = future.result()
                # print(data)
            except Exception as exc:
                for error_url in next_batch:
                    error_list.append(error_url)
                print(f"一个任务发生异常: {exc}")
                # cookie=cookie_get_manual()
        # 进行反爬虫处理
        print(f"{count}轮完成，进行反爬")
        cookie=cookie_get_manual()


    # 循环结束后，保存DataFrame到CSV
    list_headers = ['company_brief_address', 'company_detailed_address', 'hr_name',
                    'job_tags', 'job_title', 'job_salary', 'company_name', 'company_intro',
                    'company_status', 'company_size', 'company_type', 'job_description']
    # 创建DataFrame
    df = pd.DataFrame(all_list, columns=list_headers)
    # 将DataFrame保存为CSV文件，不包括索引
    df.to_csv(f'{csv_name}.csv', index=False, encoding='utf-8')
    print("错误的url有:", error_list)

    count = 0
    # 循环直到所有URL都被处理
    while error_list:
        # 去重
        error_list = list(set(error_list))

        count = count + 1
        # 获取下一批URL
        print(f"需要爬{len(error_list) / urls_per_batch}轮")
        next_batch = error_list[:urls_per_batch]
        error_list = error_list[urls_per_batch:]

        # 提交到线程池
        futures = [executor.submit(fetch_url, url, headers, ban_url) for url in next_batch]

        # 等待这一批任务完成
        for future in as_completed(futures):
            try:
                data = future.result()
                # print(data)
            except Exception as exc:
                for error_url in next_batch:
                    error_list.append(error_url)
                print(f"一个任务发生异常: {exc}")
                print(f"当前的错误列表长度: {len(error_list)}")
                # cookie=cookie_get_manual()
        # 进行反爬虫处理
        print(f"{count}轮完成，进行反爬")
        cookie = cookie_get_manual()

    # 关闭线程池
    executor.shutdown()
    # 循环结束后，保存DataFrame到CSV
    list_headers = ['company_brief_address', 'company_detailed_address', 'hr_name',
                    'job_tags', 'job_title', 'job_salary', 'company_name', 'company_intro',
                    'company_status', 'company_size', 'company_type', 'job_description']
    # 创建DataFrame
    df = pd.DataFrame(all_list, columns=list_headers)
    # 将DataFrame保存为CSV文件，不包括索引
    df.to_csv(f'{csv_name}_error.csv', index=False, encoding='utf-8')
