import requests,base64
import re,csv

#
# execujs,base64,
# csv,byte>str

# 招聘网站 反扒多
# 购物网站 数据结构较新
# GitHub、马云 新网站采集
# requests , scrapy , bs4




# 本次采集网站有两次加密 page/[加密]/ 、postId=[加密] 均为base64加密
# 设置伪装头
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
}

parse_dates = []

# url 格式为 "/page/" + [加密]+ "/12/" + cityCode
# 749 长沙 530北京 538上海 763广州 765深圳 653杭州
citys = [749,530,538,763,765,653]


# 打印信息，写入csv
def print_information(dates):

    with open("test.csv", 'a+',encoding='utf-8') as fp:
        for date in dates:
            print_str = date['job_name']+','+date['salary']+","+date['city']+","+date['job_minage']+","+date['edu']\
                        +","+date['company_name']+","+date['company_scale']+","+date['well_being']+","+date['pub_time']+","+date['company_require']\
                        +","+date['company_site']
            # 过滤去除多余混乱数据
            out = "".join(print_str.split()).replace("\u200b","").replace("\ufffd","")+"\n"
            fp.write(out)


# 职位名称、薪水、所在城市、工作年限、学历要求、公司名称、公司规模、福利待遇、发布时间、岗位要求、工作地点。
def parse_url(url):
    # 定义一个开关，避免详细页面中字段重复爬取
    page_btn = True

    # 提前定义详细页面中字段名
    well_beings = []
    company_requires = []
    company_sites = []

    # 定义数组储存base加密后的页面ID
    base_id = []

    # 获取解析页面
    parse_text = requests.get(url=url,headers=headers).text

    # 职位名称
    job_name = re.findall(r'<a id="\d+".*?><span>(.+?)</span> </a>',parse_text)

    # 所在城市
    city = re.findall(r'<div class="layui-col-md1" align="right"><span>(.*?)</span>',parse_text)

    # 工作年限
    job_minage = re.findall(r'<div class="layui-col-md1" align="right">((?:不限|无经验|.*?年|))</div>',parse_text)

    # 学历要求
    edu = re.findall(r'<div class="layui-col-md1" align="right">((?:本科|大专|硕士|博士|不限))</div>', parse_text)

    # 公司名称

    company_name = re.findall(r'<div class="layui-col-md6" align="right">.+?<a href=".+?">(.+?)</a>.+?</div>', parse_text,re.S)

    # 公司规模
    company_scale = re.findall(r'<div class="layui-col-md2" align="right"><span>公司规模：(.*?)</span></div>',parse_text)

    # 发布时间
    pub_time = re.findall(r'<span class="dtime">(.*?)</span>',parse_text)

    # 薪水 详细页面
    salary = re.findall(r'<div class="layui-col-md1" align="right">((?:薪资面议|.*K))</div>',parse_text)

    # 详细页面解析
    page_id = re.findall(r'<a id="(\d+)".*?><span>.+?</span> </a>',parse_text)
    for i in page_id:
        page_id_byte = str.encode(i)
        page_id_base = base64.b64encode(page_id_byte)
        page_id_end = str(page_id_base).replace("'","").replace("b","")
        base_id.append(page_id_end)

    # 只在第一次循环中获取所有详细页面Id并同时解析，接下来的全部跳过
    if(page_btn==False):
        return
    for i in base_id:
        page_btn = False
        page_url = 'http://localhost:8080/getPosition?id='+i
        page_url_text = requests.get(url=page_url,headers=headers).text

        # 福利待遇 详细页面
        well_being = re.findall(r'<div class="layui-card-body">(.*?)</div>',page_url_text)[1]
        if well_being == []:
            well_beings.append("无福利")
        else:
            well_being_end = "".join(well_being).replace(",","、")
            # print(well_being_end)
            well_beings.append(well_being_end)

        # 岗位要求 详细页面
        company_require = re.findall(r'<div class="layui-card-body">(.*?)</div>',page_url_text)[2]
        if company_require == []:
            company_requires.append("无要求")
        else:
            company_requires.append(company_require)

        # 工作地点 详细页面
        company_site = re.findall(r'<div class="layui-card-body">(.*?)</div>',page_url_text)[3]
        if company_site == []:
            company_sites.append("工作地点空")
        else:
            company_sites.append(company_site)


    for i in range(0,len(job_name)):
        page_date = {
            "job_name":job_name[i],
            "salary":salary[i],
            "city":city[i],
            "job_minage":job_minage[i],
            "edu":edu[i],
            "company_name":company_name[i],
            "company_scale":company_scale[i],
            "well_being":well_beings[i],
            "pub_time":pub_time[i],
            "company_require":company_requires[i],
            "company_site":company_sites[i]
        }
        parse_dates.append(page_date)
    print(page_date)



for j in citys:
    # for i in range(0,1500):
    for i in range(0,100):
    #     每页页数
        page = i+1
        page_byte = str.encode(str(page)+'-alice')
        page_base = base64.b64encode(page_byte)
        page_base_end = str(page_base).replace("'","").replace("b","")
        url = "http://localhost:8080/page/"+page_base_end+"==/12/"+str(j)
        parse_url(url)

# 数据爬取完成，调用打印函数
print_information(parse_dates)





