from job import getJob, FlowDone
from data import DataSource
import requests
import logging
import json
import time
import random
from conf import Conf
import csv
import os



jobList_url = "https://www.zhipin.com/wapi/zpgeek/search/joblist.json?scene=1&query=&experience=&payType=&partTime=&degree=&industry=&scale=&stage=&jobType=&salary=&multiBusinessDistrict=&multiSubway=&pageSize=30"



def searchJobList(city, industry, posiiton, page)->dict:
  params = {
    "city":city,
    "industry":industry,
    "posiiton":posiiton,
    "page":page,
  }
  ck = dict(cookies)
  ck["wt2"] = random.choice(ck["wt2"]) 
  logging.info("使用wt2:{}发送请求".format(ck["wt2"][:8]))
  res = requests.get(jobList_url, headers=headers, cookies=ck, params=params)
  return res.json()


def extractPosition(response :dict):
  jobList = response["zpData"]["jobList"]
  lst = []
  for job in jobList:
    jobName = job["jobName"]
    ctx = currentInfo.split("-")
    industry = ctx[1]
    positionCate = ctx[2]
    positionType = ctx[3]
    brandName = job["brandName"]
  
    cityName = job["cityName"]
    salary = job["salaryDesc"]
    degree = job["jobDegree"]
    
    exp = job["jobExperience"]
    brandScale = job["brandScaleName"]+";"+job["brandStageName"]

    skills = ";".join(job["skills"])

    lst.append([jobName,salary,brandName,positionType, positionCate, degree,exp,skills,cityName,industry,brandScale])
    
  return lst

  

  

def update_stoken(token, time):
  param = {
  "group":"zzz",
  "action":"stoken",
  "param":json.dumps({
      "token":token,
      "time":time
  }, ensure_ascii=False)  
  }
  res = requests.get("http://localhost:12080/go",params=param)
  return res.json()
  
  
  
def syncJob():  
  if job != None:
    with open(".job","w", encoding="utf8") as tf:
      tf.write(json.dumps(job, ensure_ascii=False))
      logging.info("爬虫进度已保存")
      
    

def zhipinHandler(res :dict, detailFlag :dict):
      
  with open("positions.csv","a+", encoding="utf8") as f:
    match res["code"]:
      case 37:
        logging.warning(res["message"])
        logging.warning("访问jsRPC获取新token")
        token  = res["zpData"]["seed"]
        ts     = res["zpData"]["ts"]
        jsRpc  = update_stoken(token, ts)
        
        if jsRpc["status"] != 200:
          raise RuntimeError(f"jsRpc服务出错!终止爬虫: {jsRpc}")
        
        stoken = jsRpc["data"]
        logging.info(f"新的stoken: {stoken}")
        Conf.setStoken(stoken)
                
      case 0:
        logging.info("页面爬取成功")
        if res["zpData"]["hasMore"] == False:
          detailFlag["finish"] = True
          raise FlowDone
        lst = extractPosition(res)
        global lineCount
        lineCount += len(lst)
        writer = csv.writer(f)
        writer.writerows(lst)
      case _:
        raise RuntimeError(res)


def run():
  global job
  job = getJob()
  indices = DataSource.get_indices()

  logging.info("当前爬虫范围 {}".format(Conf.getConf().get("include")))
  
  if not os.path.exists(csvName):
    open(csvName,"w",encoding="utf8").close()
  
  
  with open(csvName,"r+",encoding="utf8") as f:
    if f.readline() == "":
      f.write("岗位名称,薪水,公司名,岗位类型,岗位大类,学历,经验,技能,城市,行业,企业规模\n")
  
  for c in indices["cities"]:
    city_code = c[0]
    city_name = c[1]
    for i in indices["industries"]:
      industry_code = i[0]
      industry_name = i[1]
      for p in indices["positions"]:
        positinType_code = p[0]
        positionType_name = p[1]
        position_code = p[2]
        position_name = p [3]
        
        detail = job[city_name][industry_name][positionType_name][position_name]
        global currentInfo
        currentInfo = f"{city_name}-{industry_name}行业-岗位大类:{positionType_name}-岗位类型:{position_name}"
        if detail["finish"]:
          logging.info(f"{currentInfo}已完成爬取, 跳过")
          continue
        
        logging.info(f"<<<<爬取{currentInfo}>>>>")
        
        while detail["curPage"] <= pageCount:
          logging.info("尝试爬取页面<{}>".format(detail["curPage"]))
          res = searchJobList(city_code, industry_code, position_code, detail["curPage"])
          try:
            zhipinHandler(res, detail)
          except FlowDone:
            logging.info(f"{currentInfo} 提前完成爬取,因为职位数不足")
            break
          detail["curPage"] += 1    
          time.sleep(random.random()*1 + random.gauss(mean, sd)) 
          
        detail["finish"] = True
        logging.info(f"{currentInfo} 已完成爬取")
          


if __name__ == "__main__":
  beginTime = time.time()
  logging.basicConfig(level=logging.INFO)
  cookies = Conf.getConf()["cookies"]
  headers = Conf.getConf()["headers"]
  job = None # 爬虫进度
  pageCount = Conf.getConf()["pageCount"]
  logging.info(f"{pageCount=}")
  currentInfo = "" # 行业, 岗位类型, 名称, 全局需要
  mean = Conf.getConf()["speed"]["mean"]
  sd = Conf.getConf()["speed"]["sd"]
  lineCount = 0
  csvName = "positions.csv"
  
  
  try:
    run()
  finally:
    syncJob()
    Conf.saveConfig()
    endTime = time.time()
    totalTime = endTime-beginTime
    logging.info(f"一共爬取了{lineCount}行数据,耗时{totalTime/60:.2f}min, 速度{int(lineCount / totalTime * 60)}行/min")
  
