# 代码:
import csv
import os
import random
import time
from threading import Thread
import logging
import pandas as pd
import requests
from django import setup

os.environ.setdefault('DJANGO_SETTINGS_MODULE', '招聘平台岗位数据分析推荐系统.settings')
setup()
from myApp.models import Jobinfo  # 导入前两部后再导入这个模型


# 保存至csv
def save_to_csv(rowData):
    with open('zpData.csv', 'a', newline='', encoding='utf-8') as wf:
        writer = csv.writer(wf)
        writer.writerow(rowData)


def writer_row(writer):
    writer.writerow(
        ['title', 'address', 'type', 'educational', 'workExperience', 'workTag', 'salary', 'salaryMonth',
         'companyTags', 'hrWork', 'hrName', 'practice', 'companyTitle', 'companyAvatar',
         'companyNature', 'companyStatus', 'companyPeople', 'detailUrl', 'companyUrl', 'dist'])


# csv文件初始化
def init():
    if not os.path.exists('zpData.csv'):
        with open('zpData.csv', 'a', newline='', encoding='utf-8') as wf:
            writer = csv.writer(wf)
            writer_row(writer)

    elif os.path.getsize('zpData.csv') == 0:
        with open('zpData.csv', 'a', newline='', encoding='utf-8') as wf:
            writer = csv.writer(wf)
            writer_row(writer)


# 保存到mysql数据库
# def sava_to_db():
#     data, shape = clear_csv()
#
#     print("正在将数据写入数据库")
#     for job in data:
#         try:
#             print(job)
#             Jobinfo.objects.create(
#                 title=job[0],
#                 address=job[1],
#                 type=job[2],
#                 educational=job[3],
#                 workExperience=job[4],
#                 workTag=job[5],
#                 salary=job[6],
#                 salaryMonth=job[7],
#                 companyTags=job[8],
#                 hrWork=job[9],
#                 hrName=job[10],
#                 practice=job[11],
#                 companyTitle=job[12],
#                 companyAvatar=job[13],
#                 companyNature=job[14],
#                 companyStatus=job[15],
#                 companyPeople=job[16],
#                 detailUrl=job[17],
#                 companyUrl=job[18],
#                 dist=job[19]
#             )
#         except Exception as e:
#             logging.error(f'{e}')
#             continue
#     print('总数据为:%d:' % shape)
#     print("已将全部数据写入数据库")
#     os.remove('zpData.csv')
#     init()


def get_zp_stoken(seed, ts):
    try:
        # zp = self.get_boos_302()
        data = {
            "group": "boss_rpc",
            "action": "boss",
            "seed": seed,
            "ts": ts
        }
        res = requests.get("http://127.0.0.1:5612/business-demo/invoke", params=data).json()
        # cookie_dict = convert_cookies_to_dict(res["cookie"])
        return res["zp_stoken"]
        # return res["cookie"]
    except:
        print("请打开sekiro和boss官网链接ws")


def H(zp_stoken, seed, ts, sname):
    headers = {
        'authority': 'www.zhipin.com',
        'referer': 'https://www.zhipin.com',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
        # 'cookie': f'wd_guid=4247a123-5a87-411c-abc6-b2bbae16f692; __g=-; __fid=7315220c1d5bf8bf3e801f79155c32e1; __l=l=%2Fwww.zhipin.com%2Fweb%2Fgeek%2Fjob%3Fquery%3DJava%26city%3D101250100&r=&g=&s=3&friend_source=0&s=3&friend_source=0; _bl_uid=wml0vtFb7kwe56fpqhsdodn9bvw2; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1709221636; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1709224011; __c=1709221581; __a=24605299.1709221581..1709221581.30.1.30.30; __zp_stoken__={zp_stoken};__zp_sseed__={seed}; __zp_sname__={sname}; __zp_sts__={ts};'
        'cookie': f'__zp_stoken__={zp_stoken};__zp_sseed__={seed}; __zp_sname__={sname}; __zp_sts__={ts};'
    }
    return headers


def testSpider(url, seed, ts, flag):
    time.sleep(1)
    url1 = url
    try:
        if flag:
            response = requests.get(url1, headers=H("", seed, ts, ""), proxies=proxies)
        else:
            response = requests.get(url1, headers=H("", seed, ts, ""))
        print(response.json())
        if "您的 IP 存在异常访问行为，暂时被禁止访问!" in str(response.json()):
            print("ip被封啦，快去手动解封")
            # save_to_csv()
            pass
        if response.text.find("您的访问行为异常") or response.text.find("您的账户存在异常行为."):
            while 1:
                try:
                    seed = response.json()['zpData']['seed']
                    ts = response.json()['zpData']['ts']
                    sname = response.json()['zpData']['name']
                    t = get_zp_stoken(seed, ts)
                    print("rpc远程刷新zpstoken为:" + t)
                    if t and seed and ts and sname:
                        return t, seed, ts, sname
                except:
                    continue
    except Exception as e:
        # print('等待中...')
        logging.error(f'{e}')


# 爬虫程序
def spider(j_List, flag):
    seed = ""
    ts = ""
    zp_stoken = ''
    sname = ''
    for type in j_List:
        try:
            page = 1
            # zp_stoken, seed, ts, sname = testSpider(url, seed, ts, flag)
            while page <= 10:
                url = f'https://www.zhipin.com/wapi/zpgeek/search/joblist.json?scene=1&query={type}&city=100010000&page={page}&pageSize=30'
                try:
                    print(f"========================正在爬取{type}岗位信息================================")
                    if flag:
                        response = requests.get(url, headers=H(zp_stoken, seed, ts, sname), proxies=proxies)
                    else:
                        response = requests.get(url, headers=H(zp_stoken, seed, ts, sname))
                    if "您的账户存在异常行为." in str(response.json()):
                        continue
                        # print(response.json())
                    print(f"========================正在爬取第{page}页数据===================================")
                    time.sleep(0.1)
                    job_list = response.json()["zpData"]["jobList"]
                    if len(job_list) == 0:
                        continue
                    else:
                        print(f"本页有{len(job_list)}条数据待采集")
                        for job in job_list:
                            Practice = 0  # 默认为正常岗位
                            jobData = []
                            title = job['jobName']
                            address = job['cityName']
                            type = type
                            educational = job['jobDegree']
                            workExperience = job['jobExperience']
                            workTag = job['skills']
                            salary = []
                            sa = job['salaryDesc']
                            salaries = sa.split('·')
                            s = salaries[0].split('-')
                            salaryMonth = '0薪' if len(salaries) == 1 else sa.split('·')[1]
                            if 'K' in sa:
                                # 非实习
                                if '实习' in title:
                                    if len(s) != 1:
                                        #  7-8K/月
                                        salary = list(
                                            map(lambda x: int(x) * 1000,
                                                salaries[0].replace('K', '').split('-')))  # 对非实习生的工资做处理
                                else:
                                    salary = int(salaries[0].replace('K', '')) * 1000  # 8K/月
                            elif '元/天' in sa:
                                #  实习
                                if len(s) != 1:
                                    #  150-200元/天
                                    salary = list(
                                        map(lambda x: int(x), salaries[0].replace('元/天', '').split('-')))  # 对实习生的工资做处理
                                else:
                                    salary = salaries[0].replace('元/天', '')  # 150元/天
                            companyTags = job['welfareList']
                            if len(companyTags) == 0:
                                companyTags = '无'
                            hrWork = job['bossTitle']
                            hrName = job['bossName']
                            companyTitle = job['brandName']
                            companyAvatar = job['brandLogo']
                            companyNature = job['brandIndustry']
                            companyStatus = job['brandStageName']
                            People = job['brandScaleName']  # '20-99人'
                            companyPeople = list(
                                map(lambda x: int(x),
                                    People.replace('人', '').split('-'))) if People != '10000人以上' else [0,
                                                                                                       10000]  # 三元运算符
                            # https://www.zhipin.com/job_detail/3d67396c96d2d35a1HV72du7GVFY.html?lid=2pqQGnc8DID.search.1&securityId=K2RVIYiFR19pq-e1x-mYeSQP4mFVMxcoUPhaH_j085YlCMPGxh7r6FNOTouFybcP5es87f-cfkCBKDZWs7jbEvzqpKNcAqnKk6JCfhbZao5mHrcsaMxKCJNaKYcgmbBdzfwaiar64Is~&sessionId=
                            lid = job['lid']
                            securityId = job['securityId']
                            encryptJobId = job['encryptJobId']
                            detailUrl = f'https://www.zhipin.com/job_detail/{encryptJobId}.html?lid={lid}.search.1&securityId={securityId}&sessionId='

                            l = job['encryptBrandId']
                            companyUrl = f'https://www.zhipin.com/gongsi/{l}'
                            dist = job['areaDistrict']
                            if not salary or not hrName or not companyTitle or not companyUrl:
                                continue
                            # print(f"salary[1]:{salary[1]}")
                            if '实习' in title:
                                Practice = 1  # 实习
                                if not type.find("实习生"):
                                    type = type + "实习生"
                            else:
                                if type.find("实习生"):
                                    if isinstance(salary, int):
                                        pass
                                    if int(salary[1]) > 8000:
                                        type = type.replace("实习生", '')
                                        # 非实习
                                        Practice = 0
                                    else:
                                        Practice = 1
                                else:
                                    Practice = 0
                            # print("类型:" + type, f"实习:{Practice}", f"薪资:{salary}")
                            print(title, f"实习:{Practice}", f"薪资:{salary}", address, type, educational, workExperience,
                                  workTag, salaryMonth,
                                  companyTags, hrWork, hrName, companyTitle, companyAvatar,
                                  companyNature, companyStatus, companyPeople, detailUrl, companyUrl, dist)

                            jobData.append(title)
                            jobData.append(address)
                            jobData.append(type)
                            jobData.append(educational)
                            jobData.append(workExperience)
                            jobData.append(workTag)
                            jobData.append(salary)
                            jobData.append(salaryMonth)
                            jobData.append(companyTags)
                            jobData.append(hrWork)
                            jobData.append(hrName)
                            jobData.append(Practice)
                            jobData.append(companyTitle)
                            jobData.append(companyAvatar)
                            jobData.append(companyNature)
                            jobData.append(companyStatus)
                            jobData.append(companyPeople)
                            jobData.append(detailUrl)
                            jobData.append(companyUrl)
                            jobData.append(dist)
                            save_to_csv(jobData)  # 写入csv
                            time.sleep(0.1)
                        page += 1
                except Exception as e:
                    # print("error:", e)
                    logging.error(f'{e}')
                    print('等待中')
                    if 'invalid literal for int() with base 10:' in str(e):
                        page += 1
                        continue
                    if flag:
                        time.sleep(random.randint(1, 3))
                    else:
                        time.sleep(random.randint(5, 6))
                    zp_stoken, seed, ts, sname = testSpider(url, seed, ts, flag)
                finally:
                    continue
        except:
            sava_to_db()
            continue
    sava_to_db()  # 清洗数据并写入数据库


# 利用pandas数据清洗
def clear_csv():
    try:
        df = pd.read_csv('zpData.csv')
        df.dropna(inplace=True)  # 去空
        df.drop_duplicates(inplace=True)  # 去重
        # 删除 hrName salary companyTitle companyUrl列中值为 NaN 的行
        df.dropna(subset=['hrName', 'salary', 'companyTitle', 'companyUrl'], inplace=True)
        s = df['salaryMonth']
        df['salaryMonth'] = s.map(lambda x: str(x).replace('薪', ''))  # 为多薪这行做处理
        # 查找出没有数字的sa
        df['salary'] = df['salary'].str.extract(r'(\d+)')  # 提取数字
        # 删除 salary 列中不包含数字的行
        df = df[df['salary'].str.isdigit()]
        return df.values, df.shape[0]
    except Exception as e:
        # print(e)
        logging.error(f'{e}')
        pass


# 主程序入口
if __name__ == '__main__':
    try:
        # sava_to_db()
        flag = False
        d = input("是否使用ip代理(默认不使用 使用请输入y):")
        if d == 'y' or d == 'Y':
            flag = True

        n = input("是否开启多线程爬虫(y/n):")
        if n == 'y' or n == 'Y':
            j_list2 = ['大数据实习生', 'python实习生', 'Java实习生', '网络安全实习生', '爬虫实习生', '小程序实习生', '前端开发实习生', '后端开发实习生']
            j_list1 = ['全栈工程师', 'C/C++', '后端开发', '机器学习', 'Java', '嵌入式', '前端开发', '网络安全', '爬虫', '运维测试', '大数据', '小程序']
            t1 = Thread(target=spider, args=j_list1)
            t2 = Thread(target=spider, args=j_list2)
            t1.start()
            t2.start()
            print("爬虫线程1启动")
            print("爬虫线程2启动")
        if n == 'n' or n == 'N':
            j_list = ['Java', '大数据', 'C/C++', '前端开发', '网络安全', '机器学习', '嵌入式', '全栈工程师', '后端开发', '爬虫', '运维测试', '小程序',
                      'python实习生',
                      'Java实习生', '大数据实习生', '网络安全实习生', '爬虫实习生', '小程序实习生', '前端开发实习生', '后端开发实习生']
            spider(j_list, flag)  # 开始数据采集
    except Exception as e:
        # print("出现错误:", e)

        logging.error(f'{e}')
        print('等待中...')
        sava_to_db()
