import time

import yaml
from selenium.common import exceptions

from common.basePage import ConfigManager
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import Keys
from selenium.webdriver.common.by import By
from common.chrome_driver import wd
from common.readyaml import read_yaml
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC, expected_conditions
from common.log_conf import logger
from soupsieve.css_types import Selector
import csv


def test_login_boss():
    wd.get(url='https://www.zhipin.com/shanghai/')
    logger.info('打开BOSS直聘登录页面')
    wd.maximize_window()
    logger.info('最大化窗口')
    wd.implicitly_wait(5)
    try:
        #  找到首页的登录按钮，点击登录
        login_button = wd.find_element(By.XPATH, '/html/body/div[1]/div[1]/div[1]/div[4]/div/a[4]')
        login_button.click()
        logger.info('点击登录')
        agreement_checkbox = wd.find_element(By.CLASS_NAME, 'agree-policy')
        agreement_checkbox.click()
        logger.info('点击协议')
        #  输入账号
        wechat_login = wd.find_element(By.CLASS_NAME, 'wx-login-btn')
        wechat_login.click()

        logger.info('选择微信登录')
        wd.implicitly_wait(2)
        wd.find_element(By.CLASS_NAME, 'mini-qrcode')
        logger.info('已经在二维码界面，等待扫码')
        logger.warn('等待扫码中，扫完后继续下一步')
        WebDriverWait(wd, 60).until_not(EC.presence_of_element_located((By.CLASS_NAME, 'mini-qrcode')))
        logger.warn('扫码完毕，5S后即将开始下一步')
        time.sleep(5)
    except NoSuchElementException as e:
        logger.error('出错，元素未找到，执行退出，错误代码：' + e)
        quit()


def test_search_job():
    logger.info('即将开始岗位搜索,先停顿5S')
    time.sleep(5)  # boss随机拼命刷新，等它30秒，折腾完了再进行
    current_url = wd.current_url
    #  点击首页按钮
    try:
        if current_url == 'https://www.zhipin.com/web/geek/job-recommend':
            wd.find_element(By.CLASS_NAME, 'label-text').click()
            logger.info('回调地址是{}，点击头像入推荐页'.format(current_url))
            time.sleep(30)  # boss随机拼命刷新，等它30秒，折腾完了再进行
        else:
            wd.find_element(By.CLASS_NAME, 'label-text').click()
            logger.info('回调地址是{}，点击头像进入推荐页'.format(current_url))
            time.sleep(30)  # boss随机拼命刷新，等它30秒，折腾完了再进行

    except Exception as e:
        logger.warn('点击首页功能未实现！错误代码:' + str(e))
    try:
        logger.warn('出现搜索框之前，不会进行下一步，如果不是在搜索界面，请点击首页，等待跳转')
        print('出现搜索框之前，不会进行下一步，如果不是在搜索界面，请点击首页，等待跳转')
        WebDriverWait(wd, 50).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#container > div.user-jobs-area '
                                                                                     '> div > '
                                                                                     'div.system-search-condition > '
                                                                                     'div.job-search-box.clearfix > '
                                                                                     'div > div > '
                                                                                     'div.input-wrap.input-wrap-text '
                                                                                     '> input')))
        searchbar = wd.find_element(By.CSS_SELECTOR, '#container > div.user-jobs-area > div > '
                                                     'div.system-search-condition > div.job-search-box.clearfix > div '
                                                     '> div > div.input-wrap.input-wrap-text > input')
        searchbar.send_keys(read_yaml()['BOSS']['job'], Keys.ENTER)
        logger.info('已经输入职业并且按了回车')
    except NoSuchElementException as e:
        #  此处不要显式等待了，因为50S内都没有找到搜索框，人工早已干预
        logger.warn('未找到搜索框，请注意代码稳定性以及反爬')
    #  点击
    try:
        # 显示等待，直到预设的地区选项出现
        setarea = read_yaml()['BOSS']['area_zone']
        WebDriverWait(wd, 50).until(
            expected_conditions.presence_of_element_located((By.XPATH, '//li[contains(text(),"{}")]'.format(setarea))))
        area_zone = wd.find_element(By.XPATH, '//li[contains(text(),"{}")]'.format(setarea))
        area_zone.click()
        logger.info('点击预设的区域{}'.format(setarea))
    except NoSuchElementException as e:
        logger.error('未找到你所预设的工作地区，即将停止后续操作，请检查设置或者更新反爬机制')
        # quit()
    try:
        #  显式等待，直到所选的区域信息出现；
        WebDriverWait(wd, 50).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.selected-area-name')))
        wd.find_elements(By.CLASS_NAME, 'job-name')
        logger.info('已经切换到{}的岗位页面，即将开始保存数据'.format(read_yaml()['BOSS']['job']))
        #
    except Exception as e:
        logger.warn('未找到区域设置信息，请检查，后面将继续尝试搜索，程序继续')


def test_job_info():
    f = open('.\\log\\{}\\{}.csv'.format(ConfigManager.CURRENT_DATE, ConfigManager.CURRENT_DATE_TIME), mode='a',
             encoding='utf-8', newline='')
    #  将csv文件名字给到yaml
    csv_value = {
        'csv_name': '{}'.format(f.name)
    }
    # 将即将生成的以当前时间命名的CSV文件写入testdata文件夹的csv_filename.yaml中，方便后期调用
    with open(ConfigManager.CSV_FILE_NAME_PATH, 'w') as write_yaml:
        yaml.dump(csv_value, write_yaml)
    logger.warn('CSV文件名已经写入testdata文件夹的csv_filename.yaml中')
    csv_writer = csv.DictWriter(f, fieldnames=["职位名称", "地区", "薪水", "工作年限-学历", "能力要求", "公司名字",
                                               "公司介绍",
                                               "福利待遇", "职位描述", "企业类型",
                                               "工作地址",
                                               "详情链接"])
    csv_writer.writeheader()  # 写入表头
    logger.info('csv表头写入')
    a = 0
    for page in range(1, 3):
        k = 0  # 用来设置每页爬取的数量，每页有30条数据，因全部爬取用selenium较慢
        time.sleep(2)
        #  js滚动到页面顶部
        wd.execute_script('document.documentElement.scrollTop = document.documentElement.scrollHeight')
        li_lists = wd.find_elements(By.CSS_SELECTOR, '.job-card-wrapper')
        print('检测到{}条岗位数据'.format(len(li_lists)))
        logger.warn('检测到{}条岗位数据'.format(len(li_lists)))
        for li in li_lists:
            job_name = li.find_element(By.CLASS_NAME, 'job-name').text
            job_area = li.find_element(By.CLASS_NAME, 'job-area').text
            salary = li.find_element(By.CLASS_NAME, 'salary').text
            job_tag = li.find_element(By.CSS_SELECTOR, '.job-card-wrapper .job-card-left .tag-list').text.replace('\n',
                                                                                                                  ',')
            job_ability = li.find_element(By.XPATH, './div[2]/ul').text
            company_name = li.find_element(By.CLASS_NAME, 'company-name').text
            welfare = li.find_element(By.CLASS_NAME, 'info-desc').text
            link = li.find_element(By.CLASS_NAME, 'job-card-left').get_attribute('href')

            # 点击详情页
            clic = li.find_element(By.CSS_SELECTOR, '.job-card-left')
            wd.execute_script('arguments[0].click()', clic)
            # 窗口切换到最新打开的页面
            wd.switch_to.window(wd.window_handles[-1])
            time.sleep(2)
            job_des = wd.find_element(By.XPATH, '//*[@id="main"]/div[3]/div/div[2]/div[1]/div[2]').text.replace(
                '\n',
                ' ')
            try:  # 有的公司没有公司介绍
                company_info = wd.find_element(By.CSS_SELECTOR,
                                               '.job-body-wrapper .company-info-box .fold-text').text.replace('\n',
                                                                                                              ' ')
            except NoSuchElementException:
                company_info = '此公司没有介绍'
            try:
                company_type = wd.find_element(By.CLASS_NAME, 'company-type').text.replace('企业类型\n', '')
            except exceptions.NoSuchElementException:
                company_type = '此公司未设置企业类型'
            try:
                address = wd.find_element(By.CLASS_NAME, 'location-address').text
            except NoSuchElementException as e:
                print('该公司没有地址')
                address = '该岗位未留地址，可能是猎头岗位'
            dic = {
                "职位名称": job_name,
                "地区": job_area,
                "薪水": salary,
                "工作年限-学历": job_tag,
                "能力要求": job_ability,
                "公司名字": company_name,
                "公司介绍": company_info,
                "福利待遇": welfare,
                "职位描述": job_des,
                "企业类型": company_type,
                "工作地址": address,
                "详情链接": link
            }
            # 写入数据
            csv_writer.writerow(dic)
            logger.info('开始给CSV写入{}发布的岗位为{}的，位置在{}的岗位信息'.format(company_name, job_name, address))
            k += 1
            print(dic)
            wd.close()
            # 窗口切换到第一个页面
            wd.switch_to.window(wd.window_handles[0])
            if k == 30:  # 每页爬取5条数据
                break
        time.sleep(2)
        # 点击下一页，这里下一页的按钮不好定位，用XPATH的话只对1-4页有用，第五页后面要改成a[11]
        a += 1

        if a <= 4:
            c = wd.find_element(By.XPATH, '//*[@class="options-pages"]/a[10]')
            wd.execute_script('arguments[0].click()', c)
        elif a < 7:
            c = wd.find_element(By.XPATH, '//*[@class="options-pages"]/a[11]')
            wd.execute_script('arguments[0].click()', c)
        else:
            c = wd.find_element(By.XPATH, '//*[@class="options-pages"]/a[10]')
            wd.execute_script('arguments[0].click()', c)
    # wd.close()
    # wd.quit()
# def test_chat_with_hr:
#     with open(ConfigManager.CSV_FILE_NAME_PATH,'r') as f:
#         f.read()
#         return test_chat_with_hr()
