import scrapy
from scrapy import Selector, Request
from scrapy.http import HtmlResponse
from ..items import ReptileItem
from selenium.common import NoSuchElementException
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from datetime import time
from selenium.webdriver.chrome.options import Options
import pandas as pd

# 读取 url.csv 文件获取城市编号和网页数量，生成 url
# city 默认为空字符串，当city为空字符时候遍历 url.csv 内的所有城市
def get_url(domain, city=""):
    # domain：工作领域
    # city：城市
    df = pd.read_csv("url.csv")
    url = []
    if len(city) == 0:
        city_codes = df["code"]
        pages_num = df[domain]
        city_dects = []
        for i in range(len(city_codes)):
            city_dect = {
                "code": city_codes[i],
                "page": pages_num[i]
            }
            city_dects.append(city_dect)

        for city_dect in city_dects:
            for i in range(1, city_dect["page"] + 1):
                url.append(f'https://sou.zhaopin.com/?jl={city_dect["code"]}&kw={domain}&p={str(i)}')
    else:
        city_codes = [df.loc[ df["city"] == city ]["code"]]
        pages_num = [df.loc[ df["city"] == city ][domain]]
        city_dect = {
            "code": [value for value in city_codes[0]][0],
            "page": [value for value in pages_num[0]][0],
        }
        for i in range(1 ,city_dect["page"]+1):
            url.append(f'https://sou.zhaopin.com/?jl={city_dect["code"]}&kw={domain}&p={str(i)}')

    return url

# 设置浏览器内核配置
options=Options()
options.add_experimental_option("excludeSwitches", ['enable-automation', 'enable-logging'])
# 定义浏览器驱动
web = Chrome(options=options)
# urls = get_url("Hadoop")
# urls = get_url("Java")
# urls = get_url("python")
# urls = get_url("人工智能")
# urls = get_url("数据分析师")
# urls = get_url("数据可视化")
# urls = get_url("数据挖掘")
# urls = get_url("数据标注")
# urls = get_url("智能制造")
# urls = get_url("机器学习")
# urls = get_url("深度学习")
# urls = get_url("计算机视觉")
urls = get_url("机器视觉")

class ZhaopinSpider(scrapy.Spider):
    name = 'zhaopin'
    allowed_domains = ['sou.zhaopin.com']
    # 登录页面
    start_urls = ['https://passport.zhaopin.com/login?bkUrl=https%3A%2F%2Fsou.zhaopin.com%2F%3Fkw%3DPython%26p%3D1']

    # 设置需要爬取的页面url
    def start_requests(self):
        web.get(self.start_urls[0])
        try:
            # selenium版本更新，原find_element_by_xpath需要改写，并导By包
            time(1)
            # 输入电话号码
            web.find_element(By.XPATH, '//*[@id="register-sms-1_input_1_phone"]').send_keys("############")
            time(1)
            # 点击发送验证码
            web.find_element(By.XPATH, '//*[@id="zpPassportWidgetContainer"]/div/div/div/div/div[2]/div/div/div/form/p_input-sms/div/div/button').click()
            # 输入验证码
            code = input("请输入手机验证码：")
            web.find_element(By.XPATH, '//*[@id="register-sms-1_input_2_validate_code"]').send_keys(code)
            time(1)
            # 点击接受协议
            web.find_element(By.XPATH, '//*[@id="accept"]').click()
            time(1)
            # 点击登录
            web.find_element(By.XPATH, '//*[@id="zpPassportWidgetContainer"]/div/div/div/div/div[2]/div/div/div/p_submit/div/button').click()
            time(1)
            # 获得 cookies
            cookies_dict = {cookie['name']: cookie['value'] for cookie in web.get_cookies()}
            web.close()

        except NoSuchElementException:
            yield scrapy.Request(url=self.start_urls[0], callback=self.parse)

        for url in urls:
            yield scrapy.Request(url,
                                 cookies=cookies_dict,
                                 callback=self.parse)

    # 数据解析
    def parse(self, response):
        sel = Selector(response)
        list_items = sel.xpath('//*[@id="positionList-hook"]/div/div')
        for list_item in list_items[:len(list_items)-2]:
            # 岗位名称：positionName
            # 公司名称：corporateName
            # 公司类型：corporateType
            # 公司规模：companySize
            # 月薪：monthlyPay
            # 地点：address
            # 工作经验：WorkExperience
            # 学历：diploma
            # 技术要求：technology

            zhaopin_item = ReptileItem()
            zhaopin_item['positionName'] = list_item.xpath('.//a/div[1]/div[1]/span/@title').extract_first()
            zhaopin_item['corporateName'] = list_item.xpath('.//a/div[1]/div[2]/span/text()').extract_first()
            zhaopin_item['corporateType'] = list_item.xpath('.//a/div[2]/div[2]/span[1]/text()').extract_first()
            zhaopin_item['companySize'] = list_item.xpath('.//a/div[2]/div[2]/span[2]/text()').extract_first()
            zhaopin_item['monthlyPay'] = list_item.xpath('.//a/div[2]/div[1]/p/text()').extract_first()
            zhaopin_item['address'] = list_item.xpath('.//a/div[2]/div[1]/ul/li[1]/text()').extract_first()
            zhaopin_item['workExperience'] = list_item.xpath('.//a/div[2]/div[1]/ul/li[2]/text()').extract_first()
            zhaopin_item['diploma'] = list_item.xpath('.//a/div[2]/div[1]/ul/li[3]/text()').extract_first()
            list_technology = list_item.xpath('//*[@id="positionList-hook"]/div/div[1]/a/div[3]/div[1]/div')
            list_technologys = []
            for technology in list_technology:
                list_technologys.append(technology.xpath('.//text()').extract_first())
                
            zhaopin_item['technology'] = list_technologys
            
            yield zhaopin_item
